code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import argparse
A : Tuple = "docs/source/_static/js/custom.js"
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
with open(_A , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase__ : Tuple = f.readlines()
lowerC... | 705 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_A )
if number < 0:
retu... | 5 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : Optional[int] = requests.get(url... | 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Optional[int] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT... | 5 | 0 |
def lowercase_ ( _A : int = 10 , _A : int = 1000 , _A : bool = True ):
"""simple docstring"""
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and isinstance(_A , _A )
), ... | 707 |
from __future__ import annotations
import time
import numpy as np
A : Dict = [8, 5, 9, 7]
A : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1... | 5 | 0 |
import os
def lowercase_ ( _A : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = len(grid[0] )
lowerCamelCase__ : List[str] = len(_A )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Optional[int] ... | 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sent... | 5 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Optional[int] = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",... | 709 |
import cva
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ):
'''simple docstring'''
... | 5 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
fr... | 710 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, id... | 5 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common i... | 711 |
import os
def lowercase_ ( _A : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCamelCase__ : List[Any] = [
[int(_A ) for element in line.split("," ... | 5 | 0 |
A : Optional[int] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A : Any = [{"type": "code", "content": INSTALL_CONTENT}]
A : str = {
... | 712 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Autom... | 5 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .token... | 713 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : Optional[int] = requests.get(url... | 5 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/m... | 714 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCa... | 5 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMSchedule... | 715 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ... | 5 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTok... | 716 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"facebook/xmod-base": "https://huggin... | 5 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : Any = logging.get_logger(__name__)
class _lowercase ( lowercase__ , lowercase__):
... | 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_t... | 5 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase__ ( ):
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
... | 718 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"kssteven/ibert-roberta-base": "ht... | 5 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {
... | 719 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {
"roberta-base": "https://huggingfa... | 5 | 0 |
from __future__ import annotations
def lowercase_ ( _A : list[int] ):
"""simple docstring"""
if not nums:
return 0
lowerCamelCase__ : List[str] = nums[0]
lowerCamelCase__ : str = 0
for num in nums[1:]:
lowerCamelCase... | 720 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tok... | 5 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless require... | 721 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, loggi... | 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A : List[str] = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
A : List[Any] = _L... | 700 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, ... | 5 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .im... | 701 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b"
... | 5 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logg... | 702 |
import os
from pathlib import Path
def lowercase_ ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowerCamelCase__ : Optiona... | 5 | 0 |
import os
from collections.abc import Iterator
def lowercase_ ( _A : str = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_A ):
lowerCamelCase__ : Union[str, Any] = [d for d in dir_names if d != "scripts" and d[0] not... | 703 |
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
... | 5 | 0 |
from __future__ import annotations
def lowercase_ ( _A : list[int | float] , _A : int , _A : int ):
"""simple docstring"""
if len(_A ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
... | 704 |
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = ... | 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A : Tuple = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise... | 705 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_A )
if number < 0:
retu... | 5 | 0 |
def lowercase_ ( _A : int , _A : list ):
"""simple docstring"""
_enforce_args(_A , _A )
if n == 0:
return 0
lowerCamelCase__ : Any = float("-inf" )
for i in range(1 , n + 1 ):
lowerCamelCase__ : List[str] = max... | 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Optional[int] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT... | 5 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _lowercase :
"""simple docstring"""
A__ = 42
A__ = No... | 707 |
from __future__ import annotations
import time
import numpy as np
A : Dict = [8, 5, 9, 7]
A : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1... | 5 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A : List[Any] = lo... | 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sent... | 5 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
A : Dict = log... | 709 |
import cva
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ):
'''simple docstring'''
... | 5 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplif... | 710 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, id... | 5 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ... | 711 |
import os
def lowercase_ ( _A : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCamelCase__ : List[Any] = [
[int(_A ) for element in line.split("," ... | 5 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowercase_ ( _A : List[str] , _A : str , _A : str , _A : Path , _A : str = No... | 712 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Autom... | 5 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow ... | 713 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A : Optional[int] = requests.get(url... | 5 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : Tuple = logging.get_logger(__name__)
# TODO: upload to AWS
A : List[Any] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/mai... | 714 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCa... | 5 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import Tokenize... | 715 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ... | 5 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
A : Union[str, Any] = logging.get_logger(__name__)
A : ... | 716 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"facebook/xmod-base": "https://huggin... | 5 | 0 |
def lowercase_ ( _A : int = 3 , _A : int = 7 , _A : int = 1000000 ):
"""simple docstring"""
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[str] = 1
for current_denominator in range(1 , limit + 1 ... | 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_t... | 5 | 0 |
from typing import List
import numpy as np
def UpperCamelCase__ ( _A : dict ):
"""simple docstring"""
lowerCamelCase__ : Any = {key: len(_A ) for key, value in gen_kwargs.items() if isinstance(_A , _A )}
if len(set(lists_lengths.values() )... | 718 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"kssteven/ibert-roberta-base": "ht... | 5 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"fa... | 719 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {
"roberta-base": "https://huggingfa... | 5 | 0 |
A : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A : int = [{"type": ... | 720 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tok... | 5 | 0 |
import argparse
import struct
import unittest
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : bytes ):
'''simple docstring'''
lowerCamelCase__ : Op... | 721 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, loggi... | 5 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
l... | 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] ... | 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> list[int]:
UpperCamelCase__ : Optional[Any] = len(lowerCamelCase_)
for i in range(lowerCamelCase_):
for j in range(i + 1 , lowerCamelCase_):
if numbers[j] < numbers[i]... | 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = loggi... | 6 | 1 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : str , UpperCAmelCase_ : int = 0):
UpperCamelCase__ : Optional[int] = key
def __UpperCamelCase ( self : ... | 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod(... | 6 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers... | 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
cl... | 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig... | 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcesso... | 6 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> float:
UpperCamelCase__ : Dict = sorted(numsa + numsa)
UpperCamelCase__, UpperCamelCase__ : Dict = ... | 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_a... | 6 | 1 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset... | 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any ... | 6 | 1 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, ... | 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, Radi... | 6 | 1 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = tex... | 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipel... | 6 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __lowercase (__lowerCamelCa... | 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.s... | 6 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video... | 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed fro... | 6 | 1 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models... | 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloy... | 6 | 1 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ''''''
_lowerCam... | 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root o... | 6 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
... | 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokeniz... | 6 | 1 |
'''simple docstring'''
import argparse
import copy
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : List[str] = {}
with open(lowerCamelCase_) as f:
for line in f:
if line.split()[0] not in dict_of_nei... | 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionM... | 6 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('KT')
lowerCAmelCase__ = TypeVar('VT')
class __lowercase (Generic[KT, VT] ):
def _... | 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transform... | 6 | 1 |
'''simple docstring'''
import operator
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None) -> list:
UpperCamelCase__ : Union[str, Any] = operator.lt if reverse else operator.gt
Upper... | 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Opt... | 6 | 1 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
cl... | 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassif... | 6 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.... | 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCas... | 6 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapER... | 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ... | 6 | 1 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcess... | 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowe... | 6 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase (__lowerCamelCase ):
def __init__( self : Un... | 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_uti... | 6 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase (__lowerCamelCase ):
def __init__( self : ... | 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] ... | 6 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBen... | 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = loggi... | 6 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = ... | 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod(... | 6 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@... | 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
cl... | 6 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
exc... | 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcesso... | 6 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmel... | 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_a... | 6 | 1 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCAmelCase__ = ... | 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any ... | 6 | 1 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')... | 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, Radi... | 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> list:
UpperCamelCase__ : List[str] = len(lowerCamelCase_)
for i in range(1 , lowerCamelCase_):
UpperCamelCase__ : str = collection[i]
UpperCamelCase__ ... | 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipel... | 6 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generatio... | 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.s... | 6 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/mic... | 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed fro... | 6 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = {
'facebook/mask2former-swin-small-coco-instance': (
... | 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloy... | 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative')
return 0.5 * mass * abs(lowerCamelCase_) * abs(lowerCamelCase_)
if __name__ == "__main__... | 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root o... | 6 | 1 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] ... | 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokeniz... | 6 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowercase :
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0):
UpperCa... | 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionM... | 6 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCa... | 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transform... | 6 | 1 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : list):
UpperCamelCase__ : Any = set_counts
UpperCamelCase__ : Optional[int] = max(UpperCAmelCase_)
UpperCamelCase__ : ... | 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Opt... | 6 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, bu... | 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassif... | 6 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
... | 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCas... | 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Optional[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __UpperCAmelCase ( lowerCamelCase_ = ... | 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ... | 6 | 1 |
'''simple docstring'''
import os
def __UpperCAmelCase ( ) -> str:
with open(os.path.dirname(lowerCamelCase_) + '/p022_names.txt') as file:
UpperCamelCase__ : Optional[int] = str(file.readlines()[0])
UpperCamelCase__ : Optiona... | 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowe... | 6 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[str] = Path(lowerCamelCase_)
UpperCamelCase__ ... | 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_uti... | 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_vision_text_dual_encoder':... | 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] ... | 6 | 1 |
'''simple docstring'''
lowerCAmelCase__ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transfo... | 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = loggi... | 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> list:
if any(not isinstance(lowerCamelCase_ , lowerCamelCase_) or x < 0 for x in sequence):
raise TypeError('Sequence must be list of non-negative integers')
for _ in range(len(lowerCamelCa... | 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod(... | 6 | 1 |
'''simple docstring'''
import math
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase_)
e... | 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
cl... | 6 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
... | 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcesso... | 6 | 1 |
'''simple docstring'''
lowerCAmelCase__ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install ... | 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_a... | 6 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase__ = pytest.mark.integratio... | 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any ... | 6 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ... | 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, Radi... | 6 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipel... | 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipel... | 6 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMix... | 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.s... | 6 | 1 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_byt... | 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed fro... | 6 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any ... | 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloy... | 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
... | 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root o... | 6 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase__ = Lock()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , ... | 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokeniz... | 6 | 1 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionM... | 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_to... | 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transform... | 6 | 1 |
'''simple docstring'''
from torch import nn
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
... | 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Opt... | 6 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.