| import json |
| import random |
| import jsonlines |
| import os |
|
|
| def load_data_jsonl(data_path): |
| data = [] |
| with open(data_path, "r+", encoding="utf8") as f: |
| for item in jsonlines.Reader(f): |
| data.append(item) |
|
|
| return data |
|
|
| def load_data(data_path): |
| with open(data_path, 'r') as f: |
| data = json.load(f) |
|
|
| return data |
|
|
| def ensure_dir_exists(path): |
| """Create directory if it doesn't exist""" |
| directory = os.path.dirname(path) |
| if not os.path.exists(directory): |
| os.makedirs(directory) |
| print(f"Created directory: {directory}") |
|
|
| def build_dataset(data_list, path): |
| with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f: |
| PROMPT = f.read() |
|
|
| dict_list = [] |
| for id, d in enumerate(data_list): |
| data_json = {'id': id, |
| 'image': d["image_list"], |
| 'conversations': [ |
| {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, |
| {'from': 'gpt', 'value': d["label"]} |
| ]} |
| dict_list.append(data_json) |
| with open(path, 'w', encoding='utf-8') as file: |
| for entry in dict_list: |
| json.dump(entry, file) |
| file.write('\n') |
| return len(dict_list) |
|
|
| def build_dataset_multihead(data_list, path, mask): |
| with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f: |
| PROMPT = f.read() |
|
|
| dict_list = [] |
| for id, d in enumerate(data_list): |
| data_json = {'id': id, |
| 'image': d["image_list"], |
| 'conversations': [ |
| {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, |
| {'from': 'gpt', 'value': [[d["label"]]*2, mask]} |
| ]} |
| dict_list.append(data_json) |
| with open(path, 'w', encoding='utf-8') as file: |
| for entry in dict_list: |
| json.dump(entry, file) |
| file.write('\n') |
| return len(dict_list) |
|
|
| def build_dataset_cross(data_list, path, TYPE): |
| with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f: |
| PROMPT = f.read() |
|
|
| dict_list = [] |
| origin_image_list = [] |
| boring_image_list = [] |
| origin_text_lengths = [] |
| boring_text_lengths = [] |
| for id, d in enumerate(data_list): |
| if d["label"] == 0: |
| origin_image_list.append(d["image_list"][0]) |
| boring_image_list.append(d["image_list"][1]) |
| origin_text_lengths.append(d["text_lengths"][0]) |
| boring_text_lengths.append(d["text_lengths"][1]) |
| elif d["label"] == 1: |
| origin_image_list.append(d["image_list"][1]) |
| boring_image_list.append(d["image_list"][0]) |
| origin_text_lengths.append(d["text_lengths"][1]) |
| boring_text_lengths.append(d["text_lengths"][0]) |
| else: |
| raise ValueError("Wrong label") |
|
|
| |
| |
| |
| |
| print(f'sorting the boring images') |
| |
| boring_with_lengths = list(zip(boring_image_list, boring_text_lengths)) |
| boring_with_lengths.sort(key=lambda x: x[1]) |
| |
| print(f'generating the pairs') |
| for id, origin in enumerate(origin_image_list): |
| original_length = origin_text_lengths[id] |
| |
| |
| longer_idx = 0 |
| while longer_idx < len(boring_with_lengths) and boring_with_lengths[longer_idx][1] <= original_length: |
| longer_idx += 1 |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| boring = random.choice(boring_with_lengths)[0] |
| |
| pos_neg = random.choice(["pos", "neg"]) |
| if pos_neg == 'pos': |
| data_json = {'id': id, |
| 'image': [origin, boring], |
| 'conversations': [ |
| {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, |
| {'from': 'gpt', 'value': 0} |
| ]} |
| dict_list.append(data_json) |
| else: |
| data_json = {'id': id, |
| 'image': [boring, origin], |
| 'conversations': [ |
| {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, |
| {'from': 'gpt', 'value': 1} |
| ]} |
| dict_list.append(data_json) |
| with open(path, 'w', encoding='utf-8') as file: |
| for entry in dict_list: |
| json.dump(entry, file) |
| file.write('\n') |
| return len(dict_list) |
|
|
| def build_dataset_cross_multihead(data_list, path, TYPE, mask): |
| with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/prompt/reward_model_prompt.txt', 'r') as f: |
| PROMPT = f.read() |
|
|
| dict_list = [] |
| origin_image_list = [] |
| boring_image_list = [] |
| origin_text_lengths = [] |
| boring_text_lengths = [] |
| for id, d in enumerate(data_list): |
| if d["label"] == 0: |
| origin_image_list.append(d["image_list"][0]) |
| boring_image_list.append(d["image_list"][1]) |
| origin_text_lengths.append(d["text_lengths"][0]) |
| boring_text_lengths.append(d["text_lengths"][1]) |
| elif d["label"] == 1: |
| origin_image_list.append(d["image_list"][1]) |
| boring_image_list.append(d["image_list"][0]) |
| origin_text_lengths.append(d["text_lengths"][1]) |
| boring_text_lengths.append(d["text_lengths"][0]) |
| else: |
| raise ValueError("Wrong label") |
|
|
| |
| |
| |
| |
| print(f'sorting the boring images') |
| |
| boring_with_lengths = list(zip(boring_image_list, boring_text_lengths)) |
| boring_with_lengths.sort(key=lambda x: x[1]) |
| |
| print(f'generating the pairs') |
| for id, origin in enumerate(origin_image_list): |
| original_length = origin_text_lengths[id] |
| |
| |
| longer_idx = 0 |
| while longer_idx < len(boring_with_lengths) and boring_with_lengths[longer_idx][1] <= original_length: |
| longer_idx += 1 |
| |
| |
| if longer_idx < len(boring_with_lengths) and random.random() < 0.7: |
| |
| boring = random.choice(boring_with_lengths[longer_idx:])[0] |
| else: |
| |
| if longer_idx > 0: |
| boring = random.choice(boring_with_lengths[:longer_idx])[0] |
| else: |
| boring = random.choice(boring_with_lengths)[0] |
| |
| pos_neg = random.choice(["pos", "neg"]) |
| if pos_neg == 'pos': |
| data_json = {'id': id, |
| 'image': [origin, boring], |
| 'conversations': [ |
| {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, |
| {'from': 'gpt', 'value': [[0]*2, mask]} |
| ]} |
| dict_list.append(data_json) |
| else: |
| data_json = {'id': id, |
| 'image': [boring, origin], |
| 'conversations': [ |
| {'from': 'human', 'value': f'{PROMPT}\nFirst image: <image>\nSecond image:<image>'}, |
| {'from': 'gpt', 'value': [[1]*2, mask]} |
| ]} |
| dict_list.append(data_json) |
| with open(path, 'w', encoding='utf-8') as file: |
| for entry in dict_list: |
| json.dump(entry, file) |
| file.write('\n') |
| return len(dict_list) |
|
|
| def build_json(dataset_path_list, length_list, name_list, json_path): |
| dict_list = [] |
| for dataset_path, length, name in zip(dataset_path_list, length_list, name_list): |
| dict = { |
| f"{name}": { |
| "root": "", |
| "annotation": dataset_path, |
| "data_augment": False, |
| "repeat_time": 1, |
| "length": length |
| } |
| } |
| dict_list.append(dict) |
|
|
| with open(json_path, 'w', encoding='utf-8') as file: |
| for dict in dict_list: |
| json.dump(dict, file) |
| file.write('\n') |
| |
| def split_train_test(data, train_path, test_path): |
| random.shuffle(data) |
|
|
| selected_items = data[:int(len(data) * 0.9)] |
| unselected_items = data[int(len(data) * 0.9):] |
|
|
| with open(train_path, 'w') as f: |
| json.dump(selected_items, f) |
|
|
| with open(test_path, 'w') as f: |
| json.dump(unselected_items, f) |
| |
| return selected_items, unselected_items |
|
|
| def split_train_test_original(original_dataset): |
| |
| original_data = load_data(original_dataset) |
| random.shuffle(original_data) |
| |
| |
| train_data_original = original_data[:int(len(original_data) * 0.9)] |
| test_data_original = original_data[int(len(original_data) * 0.9):] |
| |
| |
| train_image_ids = [] |
| for item in train_data_original: |
| |
| filename = item["original_image"].split("/")[-1] |
| train_image_ids.append(filename) |
| |
| test_image_ids = [] |
| for item in test_data_original: |
| |
| filename = item["original_image"].split("/")[-1] |
| test_image_ids.append(filename) |
|
|
| with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_train_ids.jsonl', 'w') as f: |
| json.dump(train_image_ids, f) |
|
|
| with open('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_test_ids.jsonl', 'w') as f: |
| json.dump(test_image_ids, f) |
|
|
| if __name__ == '__main__': |
| NAME_list = ['object_add'] |
| TYPE_list = ['cross', ''] |
|
|
| mask_dict = { |
| 'text_replaced': [1, 1], |
| 'lowperformancememe': [1, 0], |
| 'irrelevantmeme': [0, 1], |
| 'boringmeme': [1, 0] |
| } |
|
|
| for NAME in NAME_list: |
| for TYPE in TYPE_list: |
| if NAME == 'lowperformancememe': |
| dataset = f'/fs-computility/niuyazhe/lixueyan/meme/memetrash/{NAME}.jsonl' |
| elif NAME == 'text_replaced' or NAME == 'boring_detailed': |
| dataset = f'/fs-computility/niuyazhe/lixueyan/meme/memetrash/Eimages_{NAME}.json' |
| else: |
| |
| dataset = "/fs-computility/niuyazhe/shared/meme/data/meme/Eimages/Eimages_object_2.jsonl" |
|
|
|
|
| original_dataset = '/fs-computility/niuyazhe/lixueyan/jmj/DIlab/meme/memetrash/processed_dections_Eimage_UPDATED.json' |
| train_image_ids = load_data_jsonl('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_train_ids.jsonl') |
| test_image_ids = load_data_jsonl('/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/Eimages_test_ids.jsonl') |
|
|
| |
|
|
| if TYPE != '': |
| dataset_path_train =f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/Ejson/{NAME}_{TYPE}_train.jsonl' |
| dataset_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/Ejson/{NAME}_{TYPE}_test.jsonl' |
| json_path_train = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/{NAME}_{TYPE}_train.jsonl' |
| json_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}_{TYPE}/{NAME}_{TYPE}_test.jsonl' |
|
|
| else: |
| dataset_path_train =f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/Ejson/{NAME}_train.jsonl' |
| dataset_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/Ejson/{NAME}_test.jsonl' |
| json_path_train = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/{NAME}_train.jsonl' |
| json_path_test = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/{NAME}_test.jsonl' |
|
|
| train_path = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/raw_data/train.json' |
| test_path = f'/fs-computility/niuyazhe/lixueyan/meme/dataset-meme-rewardmodel/{NAME}/raw_data/test.json' |
|
|
|
|
| ensure_dir_exists(dataset_path_train) |
| ensure_dir_exists(dataset_path_test) |
| ensure_dir_exists(json_path_train) |
| ensure_dir_exists(json_path_test) |
| ensure_dir_exists(train_path) |
| ensure_dir_exists(test_path) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
|
|
| |
| train_data = load_data(train_path) |
| test_data = load_data(test_path) |
|
|
| if 'meme' in NAME: |
| name = NAME[:-4] |
| else: |
| name = NAME |
| |
| if TYPE == '': |
| length_train = build_dataset(train_data, dataset_path_train) |
| build_json([dataset_path_train], [length_train], [name], json_path_train) |
| length_test = build_dataset(test_data, dataset_path_test) |
| build_json([dataset_path_test], [length_test], [name], json_path_test) |
| |
| elif TYPE == 'cross': |
| length_train = build_dataset_cross(train_data, dataset_path_train, NAME) |
| build_json([dataset_path_train], [length_train], [name+'_'+TYPE], json_path_train) |
| length_test = build_dataset_cross(test_data, dataset_path_test, NAME) |
| build_json([dataset_path_test], [length_test], [name+'_'+TYPE], json_path_test) |
|
|
| elif TYPE == 'align_multihead': |
| length_train = build_dataset_multihead(train_data, dataset_path_train, mask_dict[NAME]) |
| build_json([dataset_path_train], [length_train], [name], json_path_train) |
| length_test = build_dataset_multihead(test_data, dataset_path_test, mask_dict[NAME]) |
| build_json([dataset_path_test], [length_test], [name], json_path_test) |
| elif TYPE == 'cross_multihead': |
| length_train = build_dataset_cross_multihead(train_data, dataset_path_train, NAME, mask_dict[NAME]) |
| build_json([dataset_path_train], [length_train], [name+'_'+TYPE], json_path_train) |
| length_test = build_dataset_cross_multihead(test_data, dataset_path_test, NAME, mask_dict[NAME]) |
| build_json([dataset_path_test], [length_test], [name+'_'+TYPE], json_path_test) |
|
|
| print(f'Done {NAME} {TYPE}') |
|
|