python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-... | transformers-main | examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py |
#!/usr/bin/env python3
import logging
import sys
import time
from dataclasses import field
from pathlib import Path
from typing import Dict, List, Optional, Union
import flax
import jax
import jax.numpy as jnp
import librosa
import numpy as np
import optax
from datasets import DatasetDict, load_dataset
from flax impor... | transformers-main | examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The Google Research Authors and The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://... | transformers-main | examples/research_projects/jax-projects/model_parallel/partitions.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-... | transformers-main | examples/research_projects/jax-projects/model_parallel/run_clm_mp.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-... | transformers-main | examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py |
# Copyright 2022 - Intel Corp. All rights reserved.
# Authors: Mayank Kumar Raunak, Javier Turek, Nicole Beckage
"""
Implementation of a new method for fine-tuning transformer models that we call
Information Gain Filtration 'IGF' on WikiText data set and compared the results
with the standard fine-tuning method
Steps... | transformers-main | examples/research_projects/information-gain-filtration/run_clm_igf.py |
# Copyright 2022 - Intel Corp. All rights reserved.
# Authors: Mayank Kumar Raunak, Javier Turek, Nicole Backage
import copy
import logging
import random
import joblib
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AdamW, G... | transformers-main | examples/research_projects/information-gain-filtration/igf/igf.py |
transformers-main | examples/research_projects/information-gain-filtration/igf/__init__.py | |
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless require... | transformers-main | examples/research_projects/performer/run_mlm_performer.py |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicab... | transformers-main | examples/research_projects/performer/modeling_flax_performer_utils.py |
# coding=utf-8
# Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
... | transformers-main | examples/research_projects/performer/modeling_flax_performer.py |
#! /usr/bin/python3
import argparse
import logging
import os
import sys
from collections import namedtuple
import torch
from modeling_bertabs import BertAbs, build_predictor
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm
from transformers import BertTokenizer
from .utils_summarizati... | transformers-main | examples/research_projects/bertabs/run_summarization.py |
transformers-main | examples/research_projects/bertabs/__init__.py | |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable... | transformers-main | examples/research_projects/bertabs/convert_bertabs_original_pytorch_checkpoint.py |
# coding=utf-8
# Copyright 2019 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.a... | transformers-main | examples/research_projects/bertabs/configuration_bertabs.py |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
# ------------
# Data loading
# ------------
class CNNDMDataset(Dataset):
"""Abstracts the dataset used to train seq2seq models.
The class will process the documents that are located in the specified
folder. The ... | transformers-main | examples/research_projects/bertabs/utils_summarization.py |
# MIT License
# Copyright (c) 2019 Yang Liu and the HuggingFace team
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, c... | transformers-main | examples/research_projects/bertabs/modeling_bertabs.py |
# coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | transformers-main | examples/research_projects/bertabs/test_utils_summarization.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | transformers-main | examples/research_projects/luke/run_luke_ner_no_trainer.py |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def padding_tensor(seq... | transformers-main | examples/research_projects/luke/luke_utils.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | transformers-main | examples/research_projects/adversarial/run_hans.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | transformers-main | examples/research_projects/adversarial/utils_hans.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.ap... | transformers-main | examples/research_projects/onnx/summarization/run_onnx_exporter.py |
"""
Code to remove duplicate initializers to reduce ONNX model size.
"""
import os
import numpy
import onnx
def _is_equal_tensor_proto(a, b):
name_a = a.name
name_b = b.name
a.name = ""
b.name = ""
res = a == b
a.name = name_a
b.name = name_b
return res
def _node_replace_input_... | transformers-main | examples/research_projects/onnx/summarization/bart_onnx/reduce_onnx_size.py |
import copy
import itertools
from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from transformers import BartConfig
from transformers.generation import GenerationMixin
def _convert_past_list_to_tuple(past_key_values):
"""
In Bart model, the type of past_key_values is tuple... | transformers-main | examples/research_projects/onnx/summarization/bart_onnx/generation_onnx.py |
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/license... | transformers-main | examples/research_projects/lxmert/processing_image.py |
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2 && Huggingface Co.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www... | transformers-main | examples/research_projects/lxmert/modeling_frcnn.py |
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal, Huggingface team :)
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://w... | transformers-main | examples/research_projects/lxmert/utils.py |
"""
coding=utf-8
Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal
Adapted From Facebook Inc, Detectron2
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/license... | transformers-main | examples/research_projects/lxmert/visualizing_image.py |
import getopt
import json
import os
# import numpy as np
import sys
from collections import OrderedDict
import datasets
import numpy as np
import torch
from modeling_frcnn import GeneralizedRCNN
from processing_image import Preprocess
from utils import Config
"""
USAGE:
``python extracting_data.py -i <img_dir> -o ... | transformers-main | examples/research_projects/lxmert/extracting_data.py |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
... | transformers-main | examples/research_projects/codeparrot/examples/train_complexity_predictor.py |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
parser = HfArgumentParser(InitializationArguments)
args = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
tokenizer = AutoToke... | transformers-main | examples/research_projects/codeparrot/scripts/initialize_model.py |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torc... | transformers-main | examples/research_projects/codeparrot/scripts/human_eval.py |
from arguments import TokenizerTrainingArguments
from datasets import load_dataset
from tqdm import tqdm
from transformers import AutoTokenizer, HfArgumentParser
from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
# Iterator for Training
def batch_iterator(batch_size=10):
for _ in tqdm(range(... | transformers-main | examples/research_projects/codeparrot/scripts/bpe_training.py |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set... | transformers-main | examples/research_projects/codeparrot/scripts/validation_loss.py |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class TrainingArguments:
"""
Configuration for training model.
"""
model_ckpt: Optional[str] = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be trained."}
)
save... | transformers-main | examples/research_projects/codeparrot/scripts/arguments.py |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import ... | transformers-main | examples/research_projects/codeparrot/scripts/minhash_deduplication.py |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoToke... | transformers-main | examples/research_projects/codeparrot/scripts/preprocessing.py |
import logging
import os
import time
from argparse import Namespace
from pathlib import Path
import datasets
import torch
from accelerate import Accelerator, DistributedType
from accelerate.utils import ProjectConfiguration
from arguments import TrainingArguments
from datasets import load_dataset
from huggingface_hub ... | transformers-main | examples/research_projects/codeparrot/scripts/codeparrot_training.py |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def tokenize(example):
output = {}
output["input_ids"] = tokenizer(example["content"], truncation=False)["input_ids"]
output["ratio... | transformers-main | examples/research_projects/codeparrot/scripts/pretokenizing.py |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def get_dataset():
data_dict = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content"... | transformers-main | examples/research_projects/codeparrot/scripts/tests/test_deduplicate.py |
transformers-main | examples/research_projects/codeparrot/scripts/tests/__init__.py | |
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
# Copyright 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# htt... | transformers-main | examples/research_projects/quantization-qdqbert/trainer_quant_qa.py |
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless require... | transformers-main | examples/research_projects/quantization-qdqbert/utils_qa.py |
import os
import time
import numpy as np
import onnxruntime as ort
os.environ["ORT_TENSORRT_INT8_ENABLE"] = "1"
os.environ["ORT_TENSORRT_INT8_USE_NATIVE_CALIBRATION_TABLE"] = "0"
os.environ["ORT_TENSORRT_ENGINE_CACHE_ENABLE"] = "1"
sess_opt = ort.SessionOptions()
sess_opt.graph_optimization_level = ort.GraphOptimiz... | transformers-main | examples/research_projects/quantization-qdqbert/ort-infer-benchmark.py |
# coding=utf-8
# Copyright 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required... | transformers-main | examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py |
# coding=utf-8
# Copyright 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required... | transformers-main | examples/research_projects/quantization-qdqbert/quant_trainer.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
# Copyright 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the ... | transformers-main | examples/research_projects/quantization-qdqbert/run_quant_qa.py |
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless require... | transformers-main | examples/research_projects/mlm_wwm/run_mlm_wwm.py |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
#... | transformers-main | examples/research_projects/mlm_wwm/run_chinese_ref.py |
#!/usr/bin/env python
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from utils import calculate_bleu, calcula... | transformers-main | examples/research_projects/seq2seq-distillation/run_eval.py |
import re
from filelock import FileLock
try:
import nltk
NLTK_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
NLTK_AVAILABLE = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def add_newline_to_end_of_each_sentence(x: str) -> str:
... | transformers-main | examples/research_projects/seq2seq-distillation/sentence_splitter.py |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
logger = logging.get_logger(__name__)
def copy_layers(src_layers: nn.ModuleList, des... | transformers-main | examples/research_projects/seq2seq-distillation/make_student.py |
import argparse
import logging
import os
import sys
import tempfile
from pathlib import Path
import lightning_base
import pytest
import pytorch_lightning as pl
import torch
from convert_pl_checkpoint_to_hf import convert_pl_to_hf
from distillation import distill_main
from finetune import SummarizationModule, main
from... | transformers-main | examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
... | transformers-main | examples/research_projects/seq2seq-distillation/lightning_base.py |
# as due to their complexity multi-gpu tests could impact other tests, and to aid debug we have those in a separate module.
import os
import sys
from pathlib import Path
import torch
from transformers.testing_utils import TestCasePlus, execute_subprocess_async, require_torch_multi_gpu
from utils import load_json
C... | transformers-main | examples/research_projects/seq2seq-distillation/_test_seq2seq_examples_multi_gpu.py |
import itertools
import json
import linecache
import math
import os
import pickle
import socket
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Tuple, Union
import git
import numpy as np
import torch
import torch.distributed as dist
from rouge_score import roug... | transformers-main | examples/research_projects/seq2seq-distillation/utils.py |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils import save_json
def count_trainable_parameters(model):
model_parameters... | transformers-main | examples/research_projects/seq2seq-distillation/callbacks.py |
#!/usr/bin/env python
import os
from pathlib import Path
from typing import Dict, List
import fire
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from transformers.utils.logging import get_logger
logger = get_logger(__name__)
def remove_prefix(text: str, prefix: str):
if text.star... | transformers-main | examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
TINY_BART = "sshleifer/bart-tiny-random"
TINY_T5 = "patrickvonplaten/t5-t... | transformers-main | examples/research_projects/seq2seq-distillation/_test_make_student.py |
#!/usr/bin/env python
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from tran... | transformers-main | examples/research_projects/seq2seq-distillation/_test_bash_script.py |
#!/usr/bin/env python
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import Seq2SeqLoggingCallback, get_checkpoin... | transformers-main | examples/research_projects/seq2seq-distillation/finetune.py |
#!/usr/bin/env python
import argparse
import gc
import os
import sys
from pathlib import Path
from typing import List # noqa: F401
import pytorch_lightning as pl
import torch
from finetune import SummarizationModule, TranslationModule
from finetune import main as ft_main
from make_student import create_student_by_co... | transformers-main | examples/research_projects/seq2seq-distillation/distillation.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | transformers-main | examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LI... | transformers-main | examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py |
#!/usr/bin/env python3
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def log_results(result: Dataset, args: Dict[str, str]):
"""DO NOT CHANGE. This function computes and logs t... | transformers-main | examples/research_projects/robust-speech-event/eval.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a cop... | transformers-main | examples/research_projects/distillation/run_squad_w_distillation.py |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Un... | transformers-main | examples/research_projects/distillation/lm_seqs_dataset.py |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Un... | transformers-main | examples/research_projects/distillation/grouped_batch_sampler.py |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Un... | transformers-main | examples/research_projects/distillation/distiller.py |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Un... | transformers-main | examples/research_projects/distillation/utils.py |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | transformers-main | examples/research_projects/distillation/train.py |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | transformers-main | examples/research_projects/distillation/scripts/token_counts.py |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | transformers-main | examples/research_projects/distillation/scripts/binarized_data.py |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | transformers-main | examples/research_projects/distillation/scripts/extract_distilbert.py |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | transformers-main | examples/research_projects/distillation/scripts/extract.py |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-... | transformers-main | examples/research_projects/layoutlmv3/run_funsd_cord.py |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses... | transformers-main | examples/research_projects/mm-imdb/utils_mmimdb.py |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses... | transformers-main | examples/research_projects/mm-imdb/run_mmimdb.py |
"""Finetuning script for RAG models. Adapted from examples.seq2seq.finetune.py"""
import argparse
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
import... | transformers-main | examples/research_projects/rag/finetune_rag.py |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transfo... | transformers-main | examples/research_projects/rag/utils_rag.py |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def count_trainable_parameters(model):
model_paramet... | transformers-main | examples/research_projects/rag/callbacks_rag.py |
"""
This script reads DPR retriever training data and parses each datapoint. We save a line per datapoint.
Each line consists of the query followed by a tab-separated list of Wikipedia page titles constituting
positive contexts for a given query.
"""
import argparse
import json
from tqdm import tqdm
def main():
... | transformers-main | examples/research_projects/rag/parse_dpr_relevance_data.py |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
... | transformers-main | examples/research_projects/rag/lightning_base.py |
import os
import sys
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)))
| transformers-main | examples/research_projects/rag/__init__.py |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
logger = logging.getLogger(__name__)
class RayRetriever:
def __init__(self):
self.initialized = False
def create_rag_retriever(sel... | transformers-main | examples/research_projects/rag/distributed_ray_retriever.py |
""" Evaluation script for RAG models."""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as trans... | transformers-main | examples/research_projects/rag/eval_rag.py |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
logger = logging.getLogger(__name__)
class RagPyTorchDistributedRetriever(RagRetriever):
"""
A distributed retriever built on top of ... | transformers-main | examples/research_projects/rag/distributed_pytorch_retriever.py |
import json
import os
import shutil
import sys
import tempfile
import unittest
from unittest import TestCase
from unittest.mock import patch
import faiss
import numpy as np
from datasets import Dataset
from transformers import BartConfig, BartTokenizer, DPRConfig, DPRQuestionEncoderTokenizer, RagConfig
from transform... | transformers-main | examples/research_projects/rag/test_distributed_retriever.py |
"""
A script creating a RAG checkpoint from a generator and a question encoder checkpoints.
"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def consolidate(
model_type,
generator_name_or_path: str,
... | transformers-main | examples/research_projects/rag/consolidate_rag_checkpoint.py |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.b... | transformers-main | examples/research_projects/rag/_test_finetune_rag.py |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import (
DPRCo... | transformers-main | examples/research_projects/rag/use_own_knowledge_dataset.py |
# Parts of the code are adapted from the snippets provided in the TorchAudio Wav2Vec forced alignment tutorial.
# The full tutorial can be found here: https://pytorch.org/audio/stable/tutorials/forced_alignment_tutorial.html
import argparse
import os
from dataclasses import dataclass
import torch
import torchaudio
fr... | transformers-main | examples/research_projects/wav2vec2/alignment.py |
#!/usr/bin/env python3
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
... | transformers-main | examples/research_projects/wav2vec2/run_pretrain.py |
#!/usr/bin/env python3
import logging
import pathlib
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Set, Union
import datasets
import librosa
import numpy as np
import torch
from lang_trans import arabic
from packaging import version
from torch imp... | transformers-main | examples/research_projects/wav2vec2/run_asr.py |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | transformers-main | examples/research_projects/wav2vec2/test_wav2vec2_deepspeed.py |
#!/usr/bin/env python3
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from tr... | transformers-main | examples/research_projects/wav2vec2/run_common_voice.py |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import List, Optional
import torch
from datasets import Dataset
from torch import nn
from tqdm.auto import tqdm
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
HfArgumentParser,
Train... | transformers-main | examples/research_projects/zero-shot-distillation/distill_classifier.py |
#! /usr/bin/env python3
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless ... | transformers-main | examples/research_projects/pplm/run_pplm.py |
from torch import nn
class ClassificationHead(nn.Module):
"""Classification Head for transformer encoders"""
def __init__(self, class_size, embed_size):
super().__init__()
self.class_size = class_size
self.embed_size = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size... | transformers-main | examples/research_projects/pplm/pplm_classification_head.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.