hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4414ffb6ed6ec95ee5ef9886e9cc643654473f99
| 45,372
|
py
|
Python
|
samt/samt.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | null | null | null |
samt/samt.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | null | null | null |
samt/samt.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
import math
import platform
import signal
import sys
import traceback
import types
from collections import deque
from inspect import iscoroutinefunction, isgenerator, isasyncgen
from os import path, system
from typing import Dict, Callable, Tuple, Iterable, Union, Collection
import aiotask_context as _context
import collections
import telepot
import telepot.aio.delegate
import toml
from telepot.aio.loop import MessageLoop
from telepot.exception import TelegramError
from telepot.namedtuple import InlineKeyboardButton, InlineKeyboardMarkup, KeyboardButton, ReplyKeyboardMarkup, \
ReplyKeyboardRemove
from samt.helper import *
logger = logging.getLogger(__name__)
def _load_configuration(filename: str) -> dict:
"""
Loads the main configuration file from disk
:param filename: The name of the user configuration file
:return: The configuration as a dictionary
"""
script_path = path.dirname(path.realpath(sys.argv[0]))
return toml.load(f"{script_path}/config/{filename}.toml")
def _config_value(*keys, default: Any = None) -> Any:
"""
Safely accesses any key in the configuration and returns a default value if it is not found
:param keys: The keys to the config dictionary
:param default: The value to return if nothing is found
:return: Either the desired or the default value
"""
# Traverse through the dictionaries
step = _config
for key in keys:
try:
# Try to go one step deeper
step = step[key]
# A key error will abort the operation and return the default value
except KeyError:
return default
return step
class Bot:
"""
The main class of this framework
"""
_on_termination = lambda: None
def __init__(self):
"""
Initialize the framework using the configuration file(s)
"""
# Read configuration
global _config
try:
_config = _load_configuration("config")
except FileNotFoundError:
logger.critical("The configuration file could not be found. Please make sure there is a file called " +
"config.toml in the directory config.")
quit(-1)
# Initialize logger
self._configure_logger()
# Read language files
if _config_value('bot', 'language_feature', default=False):
try:
_Session.language = _load_configuration("lang")
except FileNotFoundError:
logger.critical("The language file could not be found. Please make sure there is a file called " +
"lang.toml in the directory config or disable this feature.")
quit(-1)
signal.signal(signal.SIGINT, Bot.signal_handler)
# Prepare empty stubs
self._on_startup = None
# Create access level dictionary
self.access_checker = dict()
# Config Answer class
Answer._load_defaults()
# Load database
if _config_value('general', 'persistent_storage', default=False):
name = _config_value('general', 'storage_file', default="db.json")
args = _config_value('general', 'storage_args', default=" ").split(" ")
_Session.database = self._initialize_persistent_storage(name, *args)
else:
_Session.database = None
# Initialize bot
self._create_bot()
logger.info("Bot started")
def listen(self) -> None:
"""
Activates the bot by running it in a never ending asynchronous loop
"""
# Creates an event loop
global loop
loop = asyncio.get_event_loop()
# Changes its task factory to use the async context provided by aiotask_context
loop.set_task_factory(_context.copying_task_factory)
# Creates the forever running bot listening function as task
loop.create_task(MessageLoop(self._bot).run_forever(timeout=None))
# Create the startup as a separated task
loop.create_task(self.schedule_startup())
# Start the event loop to never end (of itself)
loop.run_forever()
def _create_bot(self) -> None:
"""
Creates the bot using the telepot API
"""
self._bot = telepot.aio.DelegatorBot(_config_value('bot', 'token'), [
telepot.aio.delegate.pave_event_space()(
telepot.aio.delegate.per_chat_id(types=["private"]),
telepot.aio.delegate.create_open,
_Session,
timeout=_config_value('bot', 'timeout', default=31536000)),
])
@staticmethod
def _configure_logger() -> None:
"""
Configures the default python logging module
"""
# Deactivate loggers of imported modules
log = logging.getLogger("parse")
log.setLevel(logging.CRITICAL)
# Convert the written level into the numeric one
level = {"info": logging.INFO,
"debug": logging.DEBUG,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL
}.get(_config_value('general', 'logging', default="error").lower(), logging.WARNING)
# Configure the logger
logger.setLevel(level)
shandler = logging.StreamHandler()
fhandler = logging.FileHandler(
f"{path.dirname(path.realpath(sys.argv[0]))}/{_config_value('general', 'logfile', default='Bot.log')}")
formatter = logging.Formatter("[%(asctime)s] %(message)s", "%x %X")
shandler.setFormatter(formatter)
fhandler.setFormatter(formatter)
logger.addHandler(shandler)
logger.addHandler(fhandler)
@staticmethod
def _initialize_persistent_storage(*args):
"""
Creates the default database
:param args: The file name to be used
:return: The database connection
"""
return TinyDB(args[0])
@staticmethod
def init_storage(func: Callable):
"""
Decorator to replace the default persistent storage
:param func: The function which initializes the storage
:return: The unchanged function
"""
Bot._initialize_persistent_storage = func
return func
@staticmethod
def load_storage(func: Callable):
"""
Decorator to replace the default load method for the persistent storage
:param func: The function which loads the user date
:return: The unchanged function
"""
_Session.load_user_data = func
@staticmethod
def update_storage(func: Callable):
"""
Decorator to replace the default update method for the persistent storage
:param func: The function which updates the user data
:return: The unchanged function
"""
_Session.update_user_data = func
@staticmethod
def answer(message: str, mode: Mode = Mode.DEFAULT) -> Callable:
"""
The wrapper for the inner decorator
:param message: The message to react upon
:param mode: The mode by which to interpret the given string
:return: The decorator itself
"""
def decorator(func: Callable) -> Callable:
"""
Adds the given method to the known routes
:param func: The function to be called
:return: The function unchanged
"""
# Add the function keyed by the given message
if mode == Mode.REGEX:
_Session.regex_routes[message] = func
if mode == Mode.PARSE:
_Session.parse_routes[message] = func
else:
_Session.simple_routes[message] = func
return func
# Return the decorator
return decorator
@staticmethod
def default_answer(func: Callable) -> Callable:
"""
A decorator for the function to be called if no other handler matches
:param func: The function to be registered
:return: The unchanged function
"""
# Remember the function
_Session.default_answer = func
return func
@staticmethod
def default_sticker_answer(func: Callable) -> Callable:
"""
A decorator for the function to be called if no other handler matches
:param func: The function to be registered
:return: The unchanged function
"""
# Remember the function
_Session.default_sticker_answer = func
return func
async def schedule_startup(self):
"""
If defined, executes the startup generator and processes the yielded answers
"""
class Dummy:
pass
dummy = Dummy()
dummy.user_id = None
dummy.bot = self._bot
if self._on_startup is None:
return
gen = self._on_startup()
if isinstance(gen, types.AsyncGeneratorType):
async for answer in gen:
# answer.language_feature = False
await answer._send(dummy)
def on_startup(self, func: types.CoroutineType):
"""
A decorator for a function to be awaited on the program's startup
:param func:
"""
# Remember the function
self._on_startup = func
@classmethod
def on_termination(cls, func):
"""
A decorator for a function to be called on the program's termination
:param func:
"""
cls._on_termination = func
@classmethod
def on_message_overflow(cls, func):
"""
A decorator for a function to be called when a message exceeds the maximal length
:param func:
"""
cls._on_message_overflow = func
@staticmethod
def _on_message_overflow(answer):
"""
:param answer: The answer which exceeded the maximal length
:return: A tuple with a new message, media type, and media
"""
with open("Temp" + str(hash(answer)) + ".txt", "w") as f:
f.write(answer.msg + "\n")
# Schedule removal of the temp file after 5 seconds
if platform.system() == "Windows":
system('start /B cmd /C "sleep 5 && del Temp' + str(hash(answer)) + '.txt"')
else:
system('bash -c "sleep 5; rm Temp' + str(hash(answer)) + '.txt" &')
return "", Media.DOCUMENT, "Temp" + str(hash(answer)) + ".txt"
@staticmethod
def signal_handler(sig, frame):
"""
A signal handler to catch a termination via CTR-C
"""
Bot._on_termination()
logger.info("Bot shuts down")
quit(0)
@staticmethod
def before_processing(func: Callable):
"""
A decorator for a function, which shall be called before each message procession
:param func:
"""
Bot._before_function = func
def check_access_level(self, level: str):
"""
The wrapper for the inner decorator
:param level: The access level that is evaluated by the decorated function
:return: The decorator
"""
def decorator(func: Callable):
"""
:param func: The function to be registered
:return: The unchanged function
"""
self.access_checker[level] = func
return func
return decorator
def access_level(self, *levels: str):
"""
The wrapper for the inner decorator
:param levels: The access levels that grant permission for the decorated function.
:return: The decorator
"""
def decorator(func: Callable):
"""
Wrapper for the decorating function
:param func: The function to be protected
:return: The decorated function
"""
async def inner(**kwargs):
"""
Checks all given access levels and calls the given function if one of them evaluated to true
:return: The message handler's usual output or None
"""
# Iterate through all given levels
for level in levels:
if self.access_checker.get(level, lambda: False)():
# If one level evaluated to True, call the function as usual
if iscoroutinefunction(func):
return await func(**kwargs)
else:
return func(**kwargs)
# If no level evaluated to True, return nothing
return None
return inner
return decorator
def ensure_parameter(self, name: str, phrase: str, choices: Collection[str] = None):
"""
The wrapper for the inner decorator
:param name: The name of the parameter to provide
:param phrase: The phrase to use when asking the user for the parameter
:param choices: The choices to show the user as callback
:return: The decorator
"""
def decorator(func: Callable):
"""
Wrapper for the decorating function
:param func: The function to be protected
:return: The decorated function
"""
async def inner(**kwargs):
"""
Checks if the requested parameter exists and aks the user to provide it, if it misses
:return: The message handler's usual output
"""
# Check if the parameter exists
if name not in kwargs:
# If not, ask the user for it
temp = (yield Answer(phrase, choices=choices))
kwargs[name] = temp
# If one level evaluated to True, call the function as usual
if iscoroutinefunction(func):
yield await func(**kwargs)
else:
yield func(**kwargs)
return inner
return decorator
@staticmethod
def _before_function():
return True
class Answer(object):
"""
An object to describe the message behavior
"""
media_commands = {
'sticker': Media.STICKER,
'voice': Media.VOICE,
'audio': Media.AUDIO,
'photo': Media.PHOTO,
'video': Media.VIDEO,
'document': Media.DOCUMENT,
}
def __init__(self, msg: str = None,
*format_content: Any,
choices: Collection = None,
callback: Callable = None,
keyboard: Collection = None,
media_type: Media = None,
media: str = None,
caption: str = None,
receiver: Union[str, int, User] = None,
edit_id: int = None):
"""
Initializes the answer object
:param msg: The message to be sent, this can be a language key or a command for a media type
:param format_content: If the message is a language key, the format arguments might be supplied here
:param choices: The choices to be presented the user as a query, either as Collection of strings, which will
automatically be aligned or as a Collection of Collection of strings to control the alignment. This argument
being not None is the indicator of being a query
:param callback: The function to be called with the next incoming message by this user. The message will be
propagated as parameter.
:param keyboard: A keyboard to be sent, either as Collection of strings, which will
automatically be aligned or as a Collection of Collection of strings to control the alignment.
:param media_type: The media type of this answer. Can be used instead of the media commands.
:param media: The path to the media to be sent. Can be used instead of the media commands.
:param caption: The caption to be sent. Can be used instead of the media commands.
:param receiver: The user ID or a user object of the user who should receiver this answer. Will default to the
user who sent the triggering message.
:param edit_id: The ID of the message whose text shall be updated.
"""
self._msg = msg
self.receiver = receiver
self.format_content = format_content
self.choices = choices
self.callback = callback
self.keyboard = keyboard
self.media_type: Media = media_type
self.media = media
self.caption = caption
self.edit_id = edit_id
async def _send(self, session) -> Dict:
"""
Sends this instance of answer to the user
:param session: The user's instance of _Session
:return : The send message as dictionary
"""
# Load the recipient's id
if self.receiver is None:
ID = session.user_id
else:
ID = self.receiver
self.mark_as_answer = False
if isinstance(ID, User):
ID = ID.id
sender = session.bot
msg = self.msg
kwargs = self._get_config()
# Catch a to long message text
if self.media_type == Media.TEXT and len(msg) > 4096:
msg, self.media_type, self.media = Bot._on_message_overflow(self)
# Check for a request for editing
if self.edit_id is not None:
return await sender.editMessageText((ID, self.edit_id), msg,
**{key: kwargs[key] for key in kwargs if key in ("parse_mode",
"disable_web_page_preview",
"reply_markup")}
)
# Call the correct method for sending the desired media type and filter the relevant kwargs
if self.media_type == Media.TEXT:
return await sender.sendMessage(ID, msg,
**{key: kwargs[key] for key in kwargs if key in ("parse_mode",
"disable_web_page_preview",
"disable_notification",
"reply_to_message_id",
"reply_markup")})
elif self.media_type == Media.STICKER:
return await sender.sendSticker(ID, self.media,
**{key: kwargs[key] for key in kwargs if key in ('disable_notification',
'reply_to_message_id',
'reply_markup')})
elif self.media_type == Media.VOICE:
return await sender.sendVoice(ID, open(self.media, "rb"),
**{key: kwargs[key] for key in kwargs if key in ("caption",
"parse_mode",
"duration",
"disable_notification",
"reply_to_message_id",
"reply_markup")})
elif self.media_type == Media.AUDIO:
return await sender.sendAudio(ID, open(self.media, "rb"),
**{key: kwargs[key] for key in kwargs if key in ("caption",
"parse_mode",
"duration",
"performer",
"title",
"disable_notification",
"reply_to_message_id",
"reply_markup")})
elif self.media_type == Media.PHOTO:
return await sender.sendPhoto(ID, open(self.media, "rb"),
**{key: kwargs[key] for key in kwargs if key in ("caption",
"parse_mode",
"disable_notification",
"reply_to_message_id",
"reply_markup")})
elif self.media_type == Media.VIDEO:
return await sender.sendVideo(ID, open(self.media, "rb"),
**{key: kwargs[key] for key in kwargs if key in ("duration",
"width",
"height",
"caption",
"parse_mode",
"supports_streaming",
"disable_notification",
"reply_to_message_id",
"reply_markup")})
elif self.media_type == Media.DOCUMENT:
return await sender.sendDocument(ID, open(self.media, "rb"),
**{key: kwargs[key] for key in kwargs if key in ("caption",
"parse_mode",
"disable_notification",
"reply_to_message_id",
"reply_markup")})
def _apply_language(self) -> str:
"""
Uses the given key and formatting addition to answer the user the appropriate language
:return The formatted text
"""
# The language code should be something like de, but could be also like de_DE or non-existent
usr = _context.get('user')
lang_code = usr.language_code.split('_')[0].lower() if usr is not None else "en"
try:
# Try to load the string with the given language code
answer: str = _Session.language[lang_code][self._msg]
except KeyError:
# Try to load the answer string in the default segment
try:
answer: str = _Session.language['default'][self._msg]
# Catch the key error which might be thrown
except KeyError as e:
# In strict mode, raise the error again, which will terminate the application
if self.strict_mode:
logger.critical('Language key "{}" not found!'.format(self._msg))
raise e
# In non-strict mode just send the user the key as answer
else:
return self._msg
# Apply formatting
if self.format_content is not None and len(self.format_content) > 0:
answer = answer.format(*self.format_content)
# Write back
return answer
def is_query(self) -> bool:
"""
Determines if the answer contains/is a query
:return: A boolean answering the call
"""
return self.choices is not None
@property
def msg(self) -> str:
"""
Returns either the message directly or the formatted one
:return: The final message to be sent
"""
# Retrieve message
if self.language_feature:
msg = self._apply_language()
else:
msg = self._msg
# If unset, determine media type
if self.media_type is None:
# Try to detect a relevant command
command = ""
if ":" in msg:
command, payload = msg.split(":", 1)
if command in ("sticker", "audio", "voice", "document", "photo", "video"):
if ";" in payload:
self.media, self.caption = payload.split(";", 1)
else:
self.media = payload
msg = None
self._msg = msg
self.media_type = self.media_commands.get(command, Media.TEXT)
return msg
def _get_config(self) -> Dict[str, Any]:
"""
Gets the kwargs for the sending methods.
:return: kwargs for the sending of the answer
"""
if self.choices is not None:
# In the case of 1-dimensional array
# align the options in pairs of 2
if isinstance(self.choices[0], (str, tuple)):
self.choices = [[y for y in self.choices[x * 2:(x + 1) * 2]] for x in
range(int(math.ceil(len(self.choices) / 2)))]
# Prepare button array
buttons = []
# Loop over all rows
for row in self.choices:
r = []
# Loop over each entry
for text in row:
# Append the text as a new button
if isinstance(text, str):
r.append(InlineKeyboardButton(
text=text, callback_data=text))
else:
r.append(InlineKeyboardButton(
text=text[0], callback_data=text[1]))
# Append the button row to the list
buttons.append(r)
# Assemble keyboard
keyboard = InlineKeyboardMarkup(inline_keyboard=buttons)
elif self.keyboard is not None:
# For anything except a collection, any previous sent keyboard is deleted
if not isinstance(self.keyboard, collections.Iterable):
keyboard = ReplyKeyboardRemove()
else:
# In the case of 1-dimensional array
# align the options in pairs of 2
if isinstance(self.keyboard[0], str):
self.keyboard = [[y for y in self.keyboard[x * 2:(x + 1) * 2]] for x in
range(int(math.ceil(len(self.keyboard) / 2)))]
# Prepare button array
buttons = []
# Loop over all rows
for row in self.keyboard:
r = []
# Loop over each entry
for text in row:
# Append the text as a new button
r.append(KeyboardButton(
text=text))
# Append the button row to the list
buttons.append(r)
# Assemble keyboard
keyboard = ReplyKeyboardMarkup(keyboard=buttons, one_time_keyboard=True)
else:
keyboard = None
return {
'parse_mode': self.markup,
'reply_to_message_id': _context.get('init_message').id if self.mark_as_answer and _context.get(
'message') is not None else None,
'disable_web_page_preview': self.disable_web_preview,
'disable_notification': self.disable_notification,
'reply_markup': keyboard,
'caption': self.caption
}
@classmethod
def _load_defaults(cls) -> None:
"""
Load default values from config
"""
cls.mark_as_answer = _config_value('bot', 'mark_as_answer', default=False)
cls.markup = _config_value('bot', 'markup', default=None)
cls.language_feature = _config_value('bot', 'language_feature', default=False)
cls.strict_mode = _config_value('bot', 'strict_mode', default=False)
cls.disable_web_preview = _config_value('bot', 'disable_web_preview', default=False)
cls.disable_notification = _config_value('bot', 'disable_notification', default=False)
class _Session(telepot.aio.helper.UserHandler):
"""
The underlying framework telepot spawns an instance of this class for every conversation its encounters.
It will be responsible for directing the bot's reactions
"""
# The routing dictionaries
simple_routes: Dict[str, Callable] = dict()
parse_routes: ParsingDict = ParsingDict()
regex_routes: RegExDict = RegExDict()
# Language files
language = None
def __init__(self, *args, **kwargs):
"""
Initialize the session, called by the underlying framework telepot
:param args: Used by telepot
:param kwargs: Used by telepot
"""
# Call superclasses superclass, allowing callback queries to be processed
super(_Session, self).__init__(include_callback_query=True, *args, **kwargs)
# Extract the user of the default arguments
self.user = User(args[0][1]['from'])
# Create dictionary to use as persistent storage
# Load data from persistent storage
if _Session.database is not None:
self.storage = _Session.load_user_data(self.user_id)
else:
self.storage = dict()
self.callback = None
self.query_callback = {}
self.query_id = None
self.last_sent = None
self.gen = None
self.gen_is_async = None
# Prepare dequeue to store sent messages' IDs
_context.set("history", deque(maxlen=_config_value("bot", "max_history_entries", default=10)))
logger.info(
"User {} connected".format(self.user))
@staticmethod
def load_user_data(user):
"""
:param user:
:return:
"""
storage = _Session.database.search(Query().user == user)
if len(storage) == 0:
_Session.database.insert({"user": user, "storage": {}})
return dict()
else:
return storage[0]["storage"]
@staticmethod
def update_user_data(user, storage):
"""
:param user:
:param storage:
:return:
"""
_Session.database.update({"storage": storage}, Query().user == user)
def is_allowed(self):
"""
Tests, if the current session's user is white listed
:return: If the user is allowed
"""
ids = _config_value("general", "allowed_ids")
# If no IDs are defined, the user is allowed
if ids is None:
return True
else:
return self.user_id in ids
async def on_close(self, timeout: int) -> None:
"""
The function which will be called by telepot when the connection times out. Unused.
:param timeout: The length of the exceeded timeout
"""
logger.info("User {} timed out".format(self.user))
pass
async def on_callback_query(self, query: Dict) -> None:
"""
The function which will be called by telepot if the incoming message is a callback query
"""
# Acknowledge the received query
# (The waiting circle in the user's application will disappear)
await self.bot.answerCallbackQuery(query['id'])
# Replace the query to prevent multiple activations
if _config_value('query', 'replace_query', default=True):
lastMessage: Answer = self.last_sent[0]
choices = lastMessage.choices
# Find the right replacement text
# This is either directly the received answer or the first element of the choice tuple
replacement = query['data'] if not isinstance(choices[0][0], tuple) or len(choices[0][0]) == 1 else next(
([x[0] for x in row if x[1] == query['data']] for row in choices), None)[0]
# Edit the message
await self.bot.editMessageText((self.user.id, query['message']['message_id']),
# The message and chat ids are inquired in this way to prevent an error when
# the user clicks on old queries
text=("{}\n<b>{}</b>" if lastMessage.markup == "HTML" else "{}\n**{}**")
.format(lastMessage.msg, replacement),
parse_mode=lastMessage.markup)
# Look for a matching callback and execute it
answer = None
func = self.query_callback.pop(query['message']['message_id'], None)
if func is not None:
if iscoroutinefunction(func):
answer = await func(query['data'])
else:
answer = func(query['data'])
elif self.gen is not None:
await self.handle_generator(msg=query['data'])
# Process answer
if answer is not None:
await self.prepare_answer(answer, log="")
async def on_chat_message(self, msg: dict) -> None:
"""
The function which will be called by telepot
:param msg: The received message as dictionary
"""
if not self.is_allowed():
return
# Tests, if it is normal message or something special
if 'text' in msg:
await self.handle_text_message(msg)
elif 'sticker' in msg:
await self.handle_sticker(msg)
async def handle_text_message(self, msg: dict) -> None:
"""
Processes a text message by routing it to the registered handlers and applying formatting
:param msg: The received message as dictionary
"""
text = msg['text']
log = f'Message by {self.user}: "{text}"'
# Prepare the context
_context.set('user', self.user)
_context.set('message', Message(msg))
_context.set('_<[storage]>_', self.storage)
# If there is currently no generator ongoing, save this message additionally as init
# This may be of use when inside a generator the starting message is needed
if self.gen is None:
_context.set("init_message", Message(msg))
# Calls the preprocessing function
if not Bot._before_function():
return
args: Tuple = ()
kwargs: Dict = {}
if text == _config_value('bot', 'cancel_command', default="/cancel"):
self.gen = None
self.callback = None
# If a generator is defined, handle it the message and return if it did not stop
if self.gen is not None:
# Call the generator and abort if he worked
if await self.handle_generator(msg=text):
return
# If a callback is defined and the text does not match the defined cancel command,
# the callback function is called
if self.callback is not None:
func = self.callback
self.callback = None
args = tuple(text)
# Check, if the message is covered by one of the known simple routes
elif text in _Session.simple_routes:
func = _Session.simple_routes[text]
# Check, if the message is covered by one of the known parse routes
elif text in _Session.parse_routes:
func, matching = _Session.parse_routes[text]
kwargs = matching.named
# Check, if the message is covered by one of the known regex routes
elif text in _Session.regex_routes:
func, matching = _Session.regex_routes[text]
kwargs = matching.groupdict()
# After everything else has not matched, call the default handler
else:
func = _Session.default_answer
# Call the matching function to process the message and catch any exceptions
try:
# The user of the framework can choose freely between synchronous and asynchronous programming
# So the program decides upon the signature how to call the function
if iscoroutinefunction(func):
answer = await func(*args, **kwargs)
else:
answer = func(*args, **kwargs)
except Exception as e:
# Depending of the exceptions type, the specific message is on a different index
if isinstance(e, OSError):
msg = e.args[1]
else:
msg = e.args[0]
err = traceback.extract_tb(sys.exc_info()[2])[-1]
err = "\n\tDuring the processing occured an error\n\t\tError message: {}\n\t\tFile: {}\n\t\tFunc: {}" \
"\n\t\tLiNo: {}\n\t\tLine: {}\n\tNothing was returned to the user" \
.format(msg, err.filename.split("/")[-1], err.name, err.lineno, err.line)
logger.warning(log + err)
# Send error message, if configured
await self.handle_error()
else:
await self.prepare_answer(answer, log)
async def prepare_answer(self, answer: Union[Answer, Iterable], log: str = "") -> None:
"""
Prepares the returned object to be processed later on
:param answer: The answer to be given
:param log: A logging string
"""
# Syncs persistent storage
if _Session.database is not None:
_Session.update_user_data(self.user_id, self.storage)
try:
# None as return will result in no answer being sent
if answer is None:
logger.info(log + "\n\tNo answer was given")
return
# Handle multiple strings or answers as return
if isinstance(answer, (tuple, list)):
if isinstance(answer[0], str):
await self.handle_answer([Answer(str(answer[0]), *answer[1:])])
elif isinstance(answer[0], Answer):
await self.handle_answer(answer)
# Handle a generator
elif isgenerator(answer) or isasyncgen(answer):
self.gen = answer
self.gen_is_async = isasyncgen(answer)
await self.handle_generator(first_call=True)
# Handle a single answer
else:
await self.handle_answer([answer])
except IndexError:
err = '\n\tAn index error occured while preparing the answer.' \
'\n\tLikely the answer is ill-formatted:\n\t\t{}'.format(str(answer))
logger.warning(log + err)
# Send error message, if configured
await self.handle_error()
return
except FileNotFoundError as e:
err = '\n\tThe request could not be fulfilled as the file "{}" could not be found'.format(e.filename)
logger.warning(log + err)
# Send error message, if configured
await self.handle_error()
return
except TelegramError as e:
reason = e.args[0]
# Try to give a clearer error description
if reason == "Bad Request: chat not found":
reason = "The recipient has either not yet started communication with this bot or blocked it"
err = '\n\tThe request could not be fulfilled as an API error occured:' \
'\n\t\t{}' \
'\n\tNothing was returned to the user'.format(reason)
logger.warning(log + err)
# Send error message, if configured
await self.handle_error()
return
except Exception as e:
# Depending of the exceptions type, the specific message is on a different index
if isinstance(e, OSError):
msg = e.args[1]
else:
msg = e.args[0]
err = traceback.extract_tb(sys.exc_info()[2])[-1]
err = "\n\tDuring the sending of the bot's answer occured an error\n\t\tError message: {}\n\t\tFile: {}" \
"\n\t\tFunc: {}\n\t\tLiNo: {}\n\t\tLine: {}\n\tNothing was returned to the user" \
"\n\tYou may report this bug as it either should not have occured " \
"or should have been properly caught" \
.format(msg, err.filename.split("/")[-1], err.name, err.lineno, err.line)
logger.warning(log + err)
# Send error message, if configured
await self.handle_error()
else:
if log is not None and len(log) > 0:
logger.info(log)
async def handle_sticker(self, msg: Dict) -> None:
"""
Processes a sticker either by sending a default answer or extracting the corresponding emojis
:param msg: The received message as dictionary
"""
if not self.is_allowed():
return
# Extract the emojis associated with the sticker
if _config_value('bot', 'extract_emojis', default=False):
logger.debug("Sticker by {}, will be dismantled".format(self.user))
msg['text'] = msg['sticker']['emoji']
await self.handle_text_message(msg)
# Or call the default handler
answer = await self.default_sticker_answer()
self.prepare_answer(answer)
async def handle_error(self) -> None:
"""
Informs the connected user that an exception occured, if enabled
"""
if _config_value('bot', 'error_reply', default=None) is not None:
await self.prepare_answer(Answer(_config_value('bot', 'error_reply')))
async def handle_answer(self, answers: Iterable[Answer]) -> None:
"""
Handle Answer objects
:param answers: Answer objects to be sent
"""
# Iterate over answers
for answer in answers:
if not isinstance(answer, Answer):
answer = Answer(str(answer))
sent = await answer._send(self)
self.last_sent = answer, sent
_context.get("history").appendleft(Message(sent))
if answer.callback is not None:
if answer.is_query():
self.query_callback[sent['message_id']] = answer.callback
else:
self.callback = answer.callback
async def handle_generator(self, msg=None, first_call=False):
"""
Performs one iteration on the generator
:param msg: The message to be sent into the generator
:param first_call: If this is the initial call to the generator
"""
# Wrap the whole process into a try to except the end of iteration exception
try:
# On first call, None has to be inserted
if first_call:
if self.gen_is_async:
answer = await self.gen.asend(None)
else:
answer = self.gen.send(None)
# On the following calls, the message is inserted
else:
if self.gen_is_async:
answer = await self.gen.asend(msg)
else:
answer = self.gen.send(msg)
await self.prepare_answer(answer)
# Return if the iterator worked properly
except (StopIteration, StopAsyncIteration):
self.gen = None
return False
else:
return True
@staticmethod
async def default_answer() -> Union[str, Answer, Iterable[str], None]:
"""
Sets the default answer function to do nothing if not overwritten
"""
@staticmethod
async def default_sticker_answer() -> Union[str, Answer, Iterable[str], None]:
"""
Sets the default sticker answer function to do nothing if not overwritten
"""
| 37.466557
| 124
| 0.536719
|
b5112bb2de95eccd0d4cfce20903060b02b66346
| 1,485
|
py
|
Python
|
Bot/Utils/pomice/exceptions.py
|
SirEduRs/Aurora
|
80ac028134de93726d8a3eb8060e8a48e7009093
|
[
"MIT"
] | null | null | null |
Bot/Utils/pomice/exceptions.py
|
SirEduRs/Aurora
|
80ac028134de93726d8a3eb8060e8a48e7009093
|
[
"MIT"
] | null | null | null |
Bot/Utils/pomice/exceptions.py
|
SirEduRs/Aurora
|
80ac028134de93726d8a3eb8060e8a48e7009093
|
[
"MIT"
] | null | null | null |
class PomiceException(Exception):
"""Base of all Pomice exceptions."""
class NodeException(Exception):
"""Base exception for nodes."""
class NodeCreationError(NodeException):
"""There was a problem while creating the node."""
class NodeConnectionFailure(NodeException):
"""There was a problem while connecting to the node."""
class NodeConnectionClosed(NodeException):
"""The node's connection is closed."""
pass
class NodeNotAvailable(PomiceException):
"""The node is currently unavailable."""
pass
class NoNodesAvailable(PomiceException):
"""There are no nodes currently available."""
pass
class TrackInvalidPosition(PomiceException):
"""An invalid position was chosen for a track."""
pass
class TrackLoadError(PomiceException):
"""There was an error while loading a track."""
pass
class FilterInvalidArgument(PomiceException):
"""An invalid argument was passed to a filter."""
pass
class SpotifyAlbumLoadFailed(PomiceException):
"""The pomice Spotify client was unable to load an album."""
pass
class SpotifyTrackLoadFailed(PomiceException):
"""The pomice Spotify client was unable to load a track."""
pass
class SpotifyPlaylistLoadFailed(PomiceException):
"""The pomice Spotify client was unable to load a playlist."""
pass
class InvalidSpotifyClientAuthorization(PomiceException):
"""No Spotify client authorization was provided for track searching."""
pass
| 22.846154
| 75
| 0.726599
|
89a47ef8a2197b28ac2d3660aac1fc8b18407385
| 379
|
py
|
Python
|
test_subprocess.py
|
roopeshhv/JDOT
|
a680d6667a56f6344ee5f87810bcebba5324849d
|
[
"MIT"
] | null | null | null |
test_subprocess.py
|
roopeshhv/JDOT
|
a680d6667a56f6344ee5f87810bcebba5324849d
|
[
"MIT"
] | null | null | null |
test_subprocess.py
|
roopeshhv/JDOT
|
a680d6667a56f6344ee5f87810bcebba5324849d
|
[
"MIT"
] | null | null | null |
import paramiko
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect('10.4.96.74', username='vivek', password='nutanix/4u')
stdin, stdout, stderr = client.exec_command('cd /home/nutanix; wget http://10.4.8.60/acro_images/DISKs/cirros-0.3.4-x86_64-disk.img')
#for line in stdout:
# print line.strip('\n')
client.close()
| 29.153846
| 133
| 0.744063
|
ca63eb2165f43946065de42c27e5f0a22ab9d4fe
| 2,540
|
py
|
Python
|
app/players/forms.py
|
rookiebulls/scala
|
504efd5187b8f15a54086590e3e5572d9eda8f16
|
[
"MIT"
] | null | null | null |
app/players/forms.py
|
rookiebulls/scala
|
504efd5187b8f15a54086590e3e5572d9eda8f16
|
[
"MIT"
] | null | null | null |
app/players/forms.py
|
rookiebulls/scala
|
504efd5187b8f15a54086590e3e5572d9eda8f16
|
[
"MIT"
] | null | null | null |
from flask.ext.wtf import Form
from wtforms import StringField, TextAreaField, SubmitField, PasswordField
from wtforms.fields.html5 import DateField
from wtforms.validators import Optional, Length, Required, URL, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User, ContentManager
class ProfileForm(Form):
email = StringField('Email', validators=[Required(), Length(1,64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64), \
Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, 'Username must have only lettters, \
numbers or underscore')])
password = PasswordField('Password', validators=[Required(), \
EqualTo('password1', message='Password must match.')])
password1 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('New')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username has already existed.')
class EditForm(Form):
email = StringField('Email', validators=[Required(), Length(1,64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64), \
Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, 'Username must have only lettters, \
numbers or underscore')])
password = PasswordField('Password', validators=[EqualTo('password1', message='Password must match.')])
password1 = PasswordField('Confirm password')
submit = SubmitField('Edit')
class ContentmanagerForm(Form):
ip_address = StringField('ServerAddress', validators=[Required()])
username = StringField('LoginName', validators=[Required()])
password = PasswordField('LoginPassword', validators=[Required(), EqualTo('password1', message='Password must match.')])
password1 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('New')
def validate_ip_address(self, field):
if ContentManager.query.filter_by(ip_address=field.data).first():
raise ValidationError('Content manager has already existed.')
class CMEditForm(Form):
ip_address = StringField('Email', validators=[Required()])
username = StringField('Username', validators=[Required()])
password = PasswordField('Password', validators=[EqualTo('password1', message='Password must match.')])
password1 = PasswordField('Confirm password')
submit = SubmitField('Edit')
| 47.924528
| 124
| 0.68189
|
580925c386c5ab5f61dfbdac476ba09dd037ff47
| 2,616
|
py
|
Python
|
DataWranglingMongoDB/Lesson2/find_author_data.py
|
napjon/moocs_solution
|
5c96f43f6cb2ae643f482580446869953a99beb6
|
[
"MIT"
] | 13
|
2016-04-29T07:21:44.000Z
|
2021-09-29T03:20:51.000Z
|
DataWranglingMongoDB/Lesson2/find_author_data.py
|
napjon/moocs_solution
|
5c96f43f6cb2ae643f482580446869953a99beb6
|
[
"MIT"
] | 1
|
2017-02-07T07:37:20.000Z
|
2017-02-19T08:37:17.000Z
|
DataWranglingMongoDB/Lesson2/find_author_data.py
|
napjon/moocs_solution
|
5c96f43f6cb2ae643f482580446869953a99beb6
|
[
"MIT"
] | 13
|
2016-01-25T03:23:57.000Z
|
2019-10-13T15:29:23.000Z
|
#!/usr/bin/env python
# Your task here is to extract data from xml on authors of an article
# and add it to a list, one item for an author.
# See the provided data structure for the expected format.
# The tags for first name, surname and email should map directly
# to the dictionary keys
import xml.etree.ElementTree as ET
article_file = "exampleResearchArticle.xml"
def get_root(fname):
tree = ET.parse(fname)
return tree.getroot()
def get_authors(root):
authors = []
for author in root.findall('./fm/bibl/aug/au'):
data = {
"fnm": None,
"snm": None,
"email": None
}
# YOUR CODE HERE
for key in data:
data[key] = author.find(key).text
data['insr'] = [e.attrib['iid'] for e in author.findall('insr')]
authors.append(data)
return authors
def test():
#withour insr
solution = [{'fnm': 'Omer', 'snm': 'Mei-Dan', 'email': 'omer@extremegate.com'}, {'fnm': 'Mike', 'snm': 'Carmont', 'email': 'mcarmont@hotmail.com'}, {'fnm': 'Lior', 'snm': 'Laver', 'email': 'laver17@gmail.com'}, {'fnm': 'Meir', 'snm': 'Nyska', 'email': 'nyska@internet-zahav.net'}, {'fnm': 'Hagay', 'snm': 'Kammar', 'email': 'kammarh@gmail.com'}, {'fnm': 'Gideon', 'snm': 'Mann', 'email': 'gideon.mann.md@gmail.com'}, {'fnm': 'Barnaby', 'snm': 'Clarck', 'email': 'barns.nz@gmail.com'}, {'fnm': 'Eugene', 'snm': 'Kots', 'email': 'eukots@gmail.com'}]
root = get_root(article_file)
data = get_authors(root)
assert data[0] == solution[0]
assert data[1]["fnm"] == solution[1]["fnm"]
#with insr
solution = [{'insr': ['I1'], 'fnm': 'Omer', 'snm': 'Mei-Dan', 'email': 'omer@extremegate.com'},
{'insr': ['I2'], 'fnm': 'Mike', 'snm': 'Carmont', 'email': 'mcarmont@hotmail.com'},
{'insr': ['I3', 'I4'], 'fnm': 'Lior', 'snm': 'Laver', 'email': 'laver17@gmail.com'},
{'insr': ['I3'], 'fnm': 'Meir', 'snm': 'Nyska', 'email': 'nyska@internet-zahav.net'},
{'insr': ['I8'], 'fnm': 'Hagay', 'snm': 'Kammar', 'email': 'kammarh@gmail.com'},
{'insr': ['I3', 'I5'], 'fnm': 'Gideon', 'snm': 'Mann', 'email': 'gideon.mann.md@gmail.com'},
{'insr': ['I6'], 'fnm': 'Barnaby', 'snm': 'Clarck', 'email': 'barns.nz@gmail.com'},
{'insr': ['I7'], 'fnm': 'Eugene', 'snm': 'Kots', 'email': 'eukots@gmail.com'}]
root = get_root(article_file)
data = get_authors(root)
assert data[0] == solution[0]
assert data[1]["insr"] == solution[1]["insr"]
test()
| 40.246154
| 551
| 0.546636
|
f02705ae1c9e695e5e33f7f5cddfb55e4ffa8e79
| 16,734
|
py
|
Python
|
idcmanager_sdk/api/idcrack/list_v2_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
idcmanager_sdk/api/idcrack/list_v2_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
idcmanager_sdk/api/idcrack/list_v2_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_v2.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from idcmanager_sdk.model.idcmanager import idc_pb2 as idcmanager__sdk_dot_model_dot_idcmanager_dot_idc__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_v2.proto',
package='idcrack',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\rlist_v2.proto\x12\x07idcrack\x1a)idcmanager_sdk/model/idcmanager/idc.proto\x1a\x1cgoogle/protobuf/struct.proto\"*\n\x14ListIDCRackV2Request\x12\x12\n\nidcrackIds\x18\x01 \x01(\t\"\xe3\x03\n\x15ListIDCRackV2Response\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\r\n\x05total\x18\x03 \x01(\x05\x12\x31\n\x04list\x18\x04 \x03(\x0b\x32#.idcrack.ListIDCRackV2Response.List\x1a\xe6\x02\n\x04List\x12:\n\x06layout\x18\x01 \x03(\x0b\x32*.idcrack.ListIDCRackV2Response.List.Layout\x12\x1c\n\x03idc\x18\x02 \x01(\x0b\x32\x0f.idcmanager.IDC\x12\x12\n\ninstanceId\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0c\n\x04\x63ode\x18\x05 \x01(\t\x12\x0e\n\x06status\x18\x06 \x01(\t\x12\x0c\n\x04type\x18\x07 \x01(\t\x12\x0c\n\x04unum\x18\x08 \x01(\x05\x12\x10\n\x08\x66reeUnum\x18\t \x01(\x05\x12\r\n\x05\x63time\x18\n \x01(\t\x12\x0f\n\x07\x63reator\x18\x0b \x01(\t\x1av\n\x06Layout\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06startU\x18\x03 \x01(\x05\x12\x11\n\toccupiedU\x18\x04 \x01(\x05\x12\'\n\x06\x64\x65vice\x18\x05 \x01(\x0b\x32\x17.google.protobuf.Struct\"~\n\x1cListIDCRackV2ResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12,\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x1e.idcrack.ListIDCRackV2Responseb\x06proto3')
,
dependencies=[idcmanager__sdk_dot_model_dot_idcmanager_dot_idc__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_LISTIDCRACKV2REQUEST = _descriptor.Descriptor(
name='ListIDCRackV2Request',
full_name='idcrack.ListIDCRackV2Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='idcrackIds', full_name='idcrack.ListIDCRackV2Request.idcrackIds', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=99,
serialized_end=141,
)
_LISTIDCRACKV2RESPONSE_LIST_LAYOUT = _descriptor.Descriptor(
name='Layout',
full_name='idcrack.ListIDCRackV2Response.List.Layout',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='idcrack.ListIDCRackV2Response.List.Layout.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='idcrack.ListIDCRackV2Response.List.Layout.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startU', full_name='idcrack.ListIDCRackV2Response.List.Layout.startU', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='occupiedU', full_name='idcrack.ListIDCRackV2Response.List.Layout.occupiedU', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device', full_name='idcrack.ListIDCRackV2Response.List.Layout.device', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=509,
serialized_end=627,
)
_LISTIDCRACKV2RESPONSE_LIST = _descriptor.Descriptor(
name='List',
full_name='idcrack.ListIDCRackV2Response.List',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='layout', full_name='idcrack.ListIDCRackV2Response.List.layout', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='idc', full_name='idcrack.ListIDCRackV2Response.List.idc', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='idcrack.ListIDCRackV2Response.List.instanceId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='idcrack.ListIDCRackV2Response.List.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='idcrack.ListIDCRackV2Response.List.code', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='idcrack.ListIDCRackV2Response.List.status', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='idcrack.ListIDCRackV2Response.List.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unum', full_name='idcrack.ListIDCRackV2Response.List.unum', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeUnum', full_name='idcrack.ListIDCRackV2Response.List.freeUnum', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='idcrack.ListIDCRackV2Response.List.ctime', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='idcrack.ListIDCRackV2Response.List.creator', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTIDCRACKV2RESPONSE_LIST_LAYOUT, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=269,
serialized_end=627,
)
_LISTIDCRACKV2RESPONSE = _descriptor.Descriptor(
name='ListIDCRackV2Response',
full_name='idcrack.ListIDCRackV2Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='idcrack.ListIDCRackV2Response.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='idcrack.ListIDCRackV2Response.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='idcrack.ListIDCRackV2Response.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='idcrack.ListIDCRackV2Response.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTIDCRACKV2RESPONSE_LIST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=627,
)
_LISTIDCRACKV2RESPONSEWRAPPER = _descriptor.Descriptor(
name='ListIDCRackV2ResponseWrapper',
full_name='idcrack.ListIDCRackV2ResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='idcrack.ListIDCRackV2ResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='idcrack.ListIDCRackV2ResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='idcrack.ListIDCRackV2ResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='idcrack.ListIDCRackV2ResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=629,
serialized_end=755,
)
_LISTIDCRACKV2RESPONSE_LIST_LAYOUT.fields_by_name['device'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_LISTIDCRACKV2RESPONSE_LIST_LAYOUT.containing_type = _LISTIDCRACKV2RESPONSE_LIST
_LISTIDCRACKV2RESPONSE_LIST.fields_by_name['layout'].message_type = _LISTIDCRACKV2RESPONSE_LIST_LAYOUT
_LISTIDCRACKV2RESPONSE_LIST.fields_by_name['idc'].message_type = idcmanager__sdk_dot_model_dot_idcmanager_dot_idc__pb2._IDC
_LISTIDCRACKV2RESPONSE_LIST.containing_type = _LISTIDCRACKV2RESPONSE
_LISTIDCRACKV2RESPONSE.fields_by_name['list'].message_type = _LISTIDCRACKV2RESPONSE_LIST
_LISTIDCRACKV2RESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTIDCRACKV2RESPONSE
DESCRIPTOR.message_types_by_name['ListIDCRackV2Request'] = _LISTIDCRACKV2REQUEST
DESCRIPTOR.message_types_by_name['ListIDCRackV2Response'] = _LISTIDCRACKV2RESPONSE
DESCRIPTOR.message_types_by_name['ListIDCRackV2ResponseWrapper'] = _LISTIDCRACKV2RESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListIDCRackV2Request = _reflection.GeneratedProtocolMessageType('ListIDCRackV2Request', (_message.Message,), {
'DESCRIPTOR' : _LISTIDCRACKV2REQUEST,
'__module__' : 'list_v2_pb2'
# @@protoc_insertion_point(class_scope:idcrack.ListIDCRackV2Request)
})
_sym_db.RegisterMessage(ListIDCRackV2Request)
ListIDCRackV2Response = _reflection.GeneratedProtocolMessageType('ListIDCRackV2Response', (_message.Message,), {
'List' : _reflection.GeneratedProtocolMessageType('List', (_message.Message,), {
'Layout' : _reflection.GeneratedProtocolMessageType('Layout', (_message.Message,), {
'DESCRIPTOR' : _LISTIDCRACKV2RESPONSE_LIST_LAYOUT,
'__module__' : 'list_v2_pb2'
# @@protoc_insertion_point(class_scope:idcrack.ListIDCRackV2Response.List.Layout)
})
,
'DESCRIPTOR' : _LISTIDCRACKV2RESPONSE_LIST,
'__module__' : 'list_v2_pb2'
# @@protoc_insertion_point(class_scope:idcrack.ListIDCRackV2Response.List)
})
,
'DESCRIPTOR' : _LISTIDCRACKV2RESPONSE,
'__module__' : 'list_v2_pb2'
# @@protoc_insertion_point(class_scope:idcrack.ListIDCRackV2Response)
})
_sym_db.RegisterMessage(ListIDCRackV2Response)
_sym_db.RegisterMessage(ListIDCRackV2Response.List)
_sym_db.RegisterMessage(ListIDCRackV2Response.List.Layout)
ListIDCRackV2ResponseWrapper = _reflection.GeneratedProtocolMessageType('ListIDCRackV2ResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTIDCRACKV2RESPONSEWRAPPER,
'__module__' : 'list_v2_pb2'
# @@protoc_insertion_point(class_scope:idcrack.ListIDCRackV2ResponseWrapper)
})
_sym_db.RegisterMessage(ListIDCRackV2ResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 44.624
| 1,401
| 0.752002
|
17ee0033b4d5c79db1da60df3d70fbab11daa5aa
| 6,764
|
py
|
Python
|
kubernetes/client/models/v1beta1_replica_set_list.py
|
woqer/python
|
3a6fe8231cefe1fa39a0a69d4b2f33044ab32745
|
[
"Apache-2.0"
] | 1
|
2019-07-12T05:38:06.000Z
|
2019-07-12T05:38:06.000Z
|
kubernetes/client/models/v1beta1_replica_set_list.py
|
woqer/python
|
3a6fe8231cefe1fa39a0a69d4b2f33044ab32745
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1beta1_replica_set_list.py
|
woqer/python
|
3a6fe8231cefe1fa39a0a69d4b2f33044ab32745
|
[
"Apache-2.0"
] | 1
|
2021-05-18T12:25:56.000Z
|
2021-05-18T12:25:56.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1ReplicaSetList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1beta1ReplicaSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1beta1ReplicaSetList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1beta1ReplicaSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1ReplicaSetList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1ReplicaSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1ReplicaSetList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1beta1ReplicaSetList.
List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
:return: The items of this V1beta1ReplicaSetList.
:rtype: list[V1beta1ReplicaSet]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1beta1ReplicaSetList.
List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
:param items: The items of this V1beta1ReplicaSetList.
:type: list[V1beta1ReplicaSet]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1beta1ReplicaSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1ReplicaSetList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1ReplicaSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1ReplicaSetList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1ReplicaSetList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The metadata of this V1beta1ReplicaSetList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1ReplicaSetList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1beta1ReplicaSetList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1ReplicaSetList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.90566
| 281
| 0.616795
|
89084d000194aef0ba40dc77f22b5a290cfc8142
| 3,770
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/web/v20200601/list_web_app_function_keys.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/web/v20200601/list_web_app_function_keys.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/web/v20200601/list_web_app_function_keys.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListWebAppFunctionKeysResult',
'AwaitableListWebAppFunctionKeysResult',
'list_web_app_function_keys',
]
@pulumi.output_type
class ListWebAppFunctionKeysResult:
"""
String dictionary resource.
"""
def __init__(__self__, id=None, kind=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Mapping[str, str]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebAppFunctionKeysResult(ListWebAppFunctionKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppFunctionKeysResult(
id=self.id,
kind=self.kind,
name=self.name,
properties=self.properties,
type=self.type)
def list_web_app_function_keys(function_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppFunctionKeysResult:
"""
String dictionary resource.
:param str function_name: Function name.
:param str name: Site name.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['functionName'] = function_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20200601:listWebAppFunctionKeys', __args__, opts=opts, typ=ListWebAppFunctionKeysResult).value
return AwaitableListWebAppFunctionKeysResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| 31.157025
| 150
| 0.622281
|
80f892e1422771bb072594b8f7ee11549b23d08c
| 10,734
|
py
|
Python
|
qa/rpc-tests/maxuploadtarget.py
|
CryptoRane/Rane
|
135c52098deda505dc240f468e2b6b2a35622610
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/maxuploadtarget.py
|
CryptoRane/Rane
|
135c52098deda505dc240f468e2b6b2a35622610
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/maxuploadtarget.py
|
CryptoRane/Rane
|
135c52098deda505dc240f468e2b6b2a35622610
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("RAND", "raned"),
help="raned binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
| 40.353383
| 140
| 0.654742
|
0980de09ddae7297291f685c234ffeb5c31fdc6a
| 6,630
|
py
|
Python
|
pro3/query_system/data.py
|
Jack-Lio/-Information-retrieval-2019
|
5c5eb675dbc6ff6977a759da07326b9629ff7f3f
|
[
"MIT"
] | null | null | null |
pro3/query_system/data.py
|
Jack-Lio/-Information-retrieval-2019
|
5c5eb675dbc6ff6977a759da07326b9629ff7f3f
|
[
"MIT"
] | null | null | null |
pro3/query_system/data.py
|
Jack-Lio/-Information-retrieval-2019
|
5c5eb675dbc6ff6977a759da07326b9629ff7f3f
|
[
"MIT"
] | null | null | null |
##############################
# get data from source files
# filename:init.py
# author: liwei
# StuID: 1711350
# date: 2019.12.5
##############################
import os
from shutil import copyfile
from sys import exit
import sys
import numpy as np
# 学院
xy_dict = {
"南开大学商学院":"bs",
"南开大学计算机学院":"cc",
"南开大学经济学院":"ec",
"南开大学历史学院":"ht",
"南开大学法学院":"law",
"南开大学文学院":"lt",
"南开大学哲学院":"phi",
}
# 资源文档目录
file_path = '..\\docs'
# 获取某一人员的快照文件路径,相对于查询是的路由地址而言
def get_html(xueyuan,name):
if os.path.exists("/snapshots/%s/%s.html"%(xy_dict[xueyuan],name)):
return "/snapshots/%s/%s.html"%(xy_dict[xueyuan],name)
else:
return "/snapshots/%s/%s.html"%(xy_dict[xueyuan],str(name).replace(" ",""))
# 获取某一人员的内容文件路径
def get_content(xueyuan,name):
if os.path.exists( "../docs/%s/%s.txt"%(xy_dict[xueyuan],name)): # 锚文本存储文件夹
return "../docs/%s/%s.txt"%(xy_dict[xueyuan],name)
else:
return "../docs/%s/%s.txt"%(xy_dict[xueyuan],str(name).replace(" ",""))
# 获取某一人员的锚文本保存路径
def get_mtext(xueyuan,name):
if os.path.exists("../docs/%s/m_text/%s_m.txt"%(xy_dict[xueyuan],name)): # 锚文本存储文件夹
return "../docs/%s/m_text/%s_m.txt"%(xy_dict[xueyuan],name)
else:
return "../docs/%s/m_text/%s_m.txt"%(xy_dict[xueyuan],str(name).replace(" ",""))
# 获取某一人员的照片保存路径
def get_img(xueyuan,name):
if os.path.exists("../docs/%s/imgs/%s.jpg"%(xy_dict[xueyuan],str(name).replace(" ",""))): # 锚文本存储文件夹
return "../docs/%s/imgs/%s.jpg" % (xy_dict[xueyuan], name)
elif os.path.exists("../docs/%s/imgs/%s.png"%(xy_dict[xueyuan],str(name).replace(" ",""))): # 锚文本存储文件夹
return "../docs/%s/imgs/%s.png" % (xy_dict[xueyuan], name)
elif os.path.exists("../docs/%s/imgs/%s.bmp"%(xy_dict[xueyuan],str(name).replace(" ",""))): # 锚文本存储文件夹
return "../docs/%s/imgs/%s.bmp" % (xy_dict[xueyuan], name)
else:
return "#"
# 获取所有的教师信息的index.txt 文件内容
def get_teacher_info():
info = dict()
# 遍历根目录对索引文本内容构建索引
for root, dirs, files in os.walk(file_path, topdown=True):
for file in files:
path_t = os.path.join(root, file)
if path_t.split('\\')[-1] != 'index.txt':
continue
print("=======>" + path_t, file)
f = open(path_t, 'r', encoding='UTF-8')
for line in f:
item_list = line.split(",")
#print(item_list)
#assert(item_list[0]+item_list[1] not in info.keys()) # 检验条件
if item_list[0] in [x.split('-')[0] for x in info.keys()]:
print("$$$$"+item_list[0]+item_list[1]) # 存在同一个人,在不同的页面出现个人主页,且内容完全一样,只要链接不同,则视同为不同的人
# 也有不同学院同名的,如果是同学院的同名情况则目前无法解决
if item_list[0]+'-'+item_list[1] in info.keys():
print("####"+item_list[0]+item_list[1] )
pc = info[item_list[0]+'-'+item_list[1]]["pageRefer"] # 同样的连接有两条指向存在,说明指向其锚文本数量为2,可用于连接分析
info[item_list[0] + '-' + item_list[1]]["pageRefer"]=pc+1
continue
info[item_list[0]+'-'+item_list[1]] = { # 建立字典项
"name":item_list[0],
"url":item_list[1],
"xueyuan":item_list[2],
"parentUrl":item_list[3],
"pageRefer":1,
}
return info
# 转移图片
def move_img():
# 遍历根目录将所有的图片转移到查询flask系统的静态目录下
for root, dirs, files in os.walk(file_path, topdown=True):
for file in files:
path_t = os.path.join(root, file)
if path_t.split('\\')[-2] != 'imgs':
continue
print("=======>" + path_t, file)
source= path_t
target = "./static/images/%s/%s"%(path_t.split('\\')[-3],path_t.split('\\')[-1])
if not os.path.exists("./static/images/%s"%(path_t.split('\\')[-3])):
os.makedirs("./static/images/%s"%(path_t.split('\\')[-3]))
# adding exception handling
try:
copyfile(source, target)
except IOError as e:
print("Unable to copy file. %s" % e)
exit(1)
except:
print("Unexpected error:", sys.exc_info())
exit(1)
# 根据爬取的静态网页链接分析获取pagerank的值,info为获取的所有教师数据,info字段如下
# "name": item_list[0],
# "url": item_list[1],
# "xueyuan": item_list[2],
# "parentUrl": item_list[3],
# "pageRefer": 1,
def pagerank(info_t):
info = dict(info_t)
url_dict = dict() # 存储网页编号映射关系
url_pair = {} # 存储网页指向对
no = 0
# 形成所有网页的序号映射表
for key in info.keys():
if info[key]["url"] not in url_dict.keys():
url_dict[info[key]["url"]] = no
no = no + 1
if info[key]["parentUrl"] not in url_dict.keys():
url_dict[info[key]["parentUrl"]] = no
no = no + 1
if info[key]["parentUrl"] not in url_pair.keys():
url_pair[info[key]["parentUrl"]] = [info[key]["url"]]
else:
url_pair[info[key]["parentUrl"]].extend([info[key]["url"]])
# 形成随机游走过程概率矩阵
N = len(url_dict.keys()) # 矩阵规模
matrix = np.zeros((N,N)) # 声明矩阵
# 计算邻接矩阵
for parenturl in url_pair.keys():
for sonurl in url_pair[parenturl]:
matrix[url_dict[parenturl]][url_dict[sonurl]] = 1
matrix[url_dict[sonurl]][url_dict[parenturl]] = 0
# 马尔科夫链 转移矩阵
for i in range(N):
count = 0
for j in range(N): # 统计1 的数量
if matrix[i][j] ==1 :
count=count +1
if count == 0 : # 一行中没有1 ,全部置位1/N
for j in range(N):
matrix[i][j] = 1.0/N
else:
for j in range(N):
if matrix[i][j]==1: # 非全0 替换为1/count
matrix[i][j]= 1.0/count
alpha = 0.1
matrix1 = matrix*(1-alpha)
matrix2 = matrix1+(alpha/N)
# 设定初始的状态概率分布向量
start = np.zeros((N,N))
start[0][0] = 1
cur = next = np.dot(start,matrix2)
times = 0
while(1):
times =times+1
exit_flag = True
next = np.dot(cur,matrix2) # 迭代
for i in range(N):
if next[0][i] != cur[0][i] :
exit_flag = False
if(exit_flag):
break
cur = next
scores = next[0]
print("end")
return url_dict,scores # 返回映射关系和page得分
print(pagerank(get_teacher_info()))
# 拷贝图片
# move_img()
| 34.175258
| 121
| 0.511312
|
eb9ad40e2924b2837a946bfdd89aed738d293424
| 7,502
|
py
|
Python
|
spark/mod_decompress_audio.py
|
droyston/spectralize
|
572770e7358acc3ec433470659759c17453409f2
|
[
"MIT"
] | null | null | null |
spark/mod_decompress_audio.py
|
droyston/spectralize
|
572770e7358acc3ec433470659759c17453409f2
|
[
"MIT"
] | null | null | null |
spark/mod_decompress_audio.py
|
droyston/spectralize
|
572770e7358acc3ec433470659759c17453409f2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 16:12:56 2020
@author: dylanroyston
"""
# import/configure packages
import numpy as np
import pandas as pd
#import pyarrow as pa
import librosa
import librosa.display
from pathlib import Path
#import Ipython.display as ipd
#import matplotlib.pyplot as plt
from pyspark.sql import *
import pyspark.sql.functions as f
from pyspark import SparkConf, SparkContext, SQLContext
import boto3
from tinytag import TinyTag as tt
#from io import BytesIO
import os
import sys
import time
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/lib")
#import config
time_seq = []
#####
# create local Spark instance (for non-cluster dev)
# sc = SparkContext('local')
# spark = SparkSession (sc)
# spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# define Spark config
def spark_conf():
conf = SparkConf().setAppName("decompress_audio_files")
sc = SparkContext(conf=conf)
spark = SparkSession.builder.getOrCreate()
return spark
spark = spark_conf()
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
#####
# Function to write spark-dataframe to PSQL
def write_df_to_psql(df, tablename):
psql_user = os.environ.get('PSQL_USR')
psql_pwd = os.environ.get('PSQL_PWD')
df.write.format('jdbc').options(
url='jdbc:postgresql://10.0.0.6:5432/spectralize',
dbtable=tablename,
user=psql_user,
password=psql_pwd).mode('append').save()
#password=psql_pwd).save()
# Function to write spark-dataframe to TimescaleDB
def write_df_to_tsdb(df, tablename):
psql_user = os.environ.get('PSQL_USR')
psql_pwd = os.environ.get('PSQL_PWD')
df.write.format('jdbc').options(
url='jdbc:postgresql://10.0.0.11:5432/spectralize',
dbtable=tablename,
user=psql_user,
#password=psql_pwd).mode('append').save()
password=psql_pwd).save()
#####
# function to read audio files from S3 bucket and extract tags
def read_audio_files():
# basic initialization
time_seq.append(['start-read-audio', time.time()])
# DataFrame schema
File_Tags = Row("s3_key", "song_id", "album", "albumartist", "artist",
"audio_offset", "bitrate", "channels", "comment", "composer",
"disc", "disc_total", "duration", "filesize", "genre",
"samplerate", "title", "track", "track_total", "year")
spec_labels = []
for sn in range(0,128):
spec_labels.append('spec' + str(sn+1))
spec_df_labels = ['song_id','timeseries'] + spec_labels
Spec_Tags = Row(spec_df_labels)
# configure S3 access
s3_bucket = 'mdp-spectralize-pal'
number_of_files = 0
s3 = boto3.resource('s3')
bucket = s3.Bucket(s3_bucket)
number_of_files=0
file_limit=5
#local_path = './local_file.'
known_ext = [".mp3", ".wav", ".m4a"]
#read each file from S3 bucket
for obj in bucket.objects.all():
s3_key = obj.key
# extract tags from mp3 files
#if "mp3" in s3_key:
if any(ext in s3_key for ext in known_ext):
print(['***** ' + number_of_files + ' *****'])
ext = s3_key[-4:]
local_path = './localfile' + ext
number_of_files+=1
bucket.download_file(s3_key, local_path)
##### tags
tags = tt.get(local_path)
# extract tags from tinytag object
indiv_tags = (s3_key, number_of_files, tags.album, tags.albumartist, tags.artist,
tags.audio_offset, tags.bitrate, tags.channels,
tags.comment, tags.composer, tags.disc,
tags.disc_total, tags.duration, tags.filesize,
tags.genre, tags.samplerate, tags.title, tags.track,
tags.track_total, tags.year)
# convert tuple object to list
indiv_tag_list = list(indiv_tags)
indiv_tag_list = [str(i) for i in indiv_tag_list]
tag_seq=[]
tag_seq.append(indiv_tag_list)
tags_pdf = pd.DataFrame(data=tag_seq)
tag_df = spark.createDataFrame(tags_pdf, schema=File_Tags)
##### audio
# load audio file with Librosa
#y, sr = librosa.load(str(Path(local_path)), sr=None)
y, sr = librosa.load(local_path, sr=None)
# create indexing variables (song_id, timestamp)
# song_id defined as "repeat(number_of_files)"
song_num = pd.Series([number_of_files])
num_points = len(y)
song_id = song_num.repeat(num_points)
song_id = song_id.to_numpy()
# timeseries defined as "1 : length(audio_data)"
timeseries = np.arange(num_points)
timeseries = timeseries.transpose()
full_audio = {'song_id': song_id, 'timeseries': timeseries,
'intensity': y}
# create combined dataframe
audio_pdf = pd.DataFrame(data = full_audio)
audio_df = spark.createDataFrame(audio_pdf)
##### spectral
S = librosa.feature.melspectrogram(y, sr=sr, n_mels=128, fmax=10000)
log_S = librosa.power_to_db(S, ref=np.max)
log_S = log_S.transpose()
# song_id defined as "repeat(number_of_files)"
song_num = pd.Series([number_of_files])
num_points = len(S.transpose())
song_id = song_num.repeat(num_points)
song_id = song_id.to_numpy()
# timeseries defined as "1 : length(audio_data)"
timeseries = np.arange(num_points)
timeseries = timeseries.transpose()
full_index = {'song_id': song_id, 'timeseries': timeseries}
index_pdf = pd.DataFrame(full_index)
spec_pdf = pd.DataFrame(data=log_S, columns=spec_labels)
full_spec = pd.concat([index_pdf, spec_pdf], axis=1)
spec_df = spark.createDataFrame(full_spec)
##### write dataframes to psql
write_df_to_psql(tag_df, 'clean_metadata')
write_df_to_tsdb(audio_df, 'clean_audio')
write_df_to_psql(spec_df, 'clean_spec')
# stop process when file_limit is crossed (small batches)
if (number_of_files >= file_limit):
break
#####
time_seq.append(['end read-file', time.time()])
#df_tags = spark.createDataFrame(tag_seq, schema=File_Tags)
#df_audio = spark.createDataFrame(audio_seq)
#df_spec = spark.createDataFrame(audio_seq, schema=Spec_Tags)
# Additional run to
#df_audio_data = spark.createDataFrame(file_audio_data)
#process_df(df_audio_data)
#####
if __name__ == '__main__':
time_seq.append(['start', time.time()])
read_audio_files()
| 30.620408
| 94
| 0.565049
|
1714ebc98310a294b05e0deb6af805f67d5e468c
| 223
|
py
|
Python
|
Semester 3/Code Optimization and Debugging-I/Python/Exp_03 - Lowercase to Uppercase.py
|
Killbot2614/Programming-Lab
|
ce9ccecf35deba4ee749026b4f5c3a9e6b0ac548
|
[
"MIT"
] | 13
|
2021-10-16T05:17:51.000Z
|
2022-01-12T14:53:55.000Z
|
Semester 3/Code Optimization and Debugging-I/Python/Exp_03 - Lowercase to Uppercase.py
|
Killbot2614/Code-Optimization-and-Debugging-I
|
cb3bd0fc5bfb703b29b4a4b9a751f75792ac4a8a
|
[
"MIT"
] | null | null | null |
Semester 3/Code Optimization and Debugging-I/Python/Exp_03 - Lowercase to Uppercase.py
|
Killbot2614/Code-Optimization-and-Debugging-I
|
cb3bd0fc5bfb703b29b4a4b9a751f75792ac4a8a
|
[
"MIT"
] | 5
|
2021-09-24T17:28:08.000Z
|
2021-12-16T15:47:27.000Z
|
# Python Program to convert lowercase string to uppercase string
# Get the input string
# Convert lower case to upper case
# Print the output
print(input().upper())
# Input: sathyabama
# Output: SATHYABAMA
| 18.583333
| 66
| 0.70852
|
bcc68e1ffe11f4ca3178e81ecccb6f5ccc81896e
| 11,692
|
py
|
Python
|
stochpy/__init__.py
|
bgoli/stochpy
|
ba06e5eaf1204dbc8ea39996ff8a08e9b0b5997d
|
[
"BSD-3-Clause"
] | 35
|
2016-02-29T22:56:07.000Z
|
2022-03-06T17:21:29.000Z
|
stochpy/__init__.py
|
bgoli/stochpy
|
ba06e5eaf1204dbc8ea39996ff8a08e9b0b5997d
|
[
"BSD-3-Clause"
] | 6
|
2016-10-13T12:43:54.000Z
|
2021-04-30T09:06:59.000Z
|
stochpy/__init__.py
|
bgoli/stochpy
|
ba06e5eaf1204dbc8ea39996ff8a08e9b0b5997d
|
[
"BSD-3-Clause"
] | 13
|
2016-07-07T19:49:25.000Z
|
2021-05-14T20:24:17.000Z
|
#! /usr/bin/env python
"""
StochPy - Stochastic modeling in Python (http://stochpy.sourceforge.net)
Copyright (C) 2010-2015 T.R Maarlveld, B.G. Olivier, F.J. Bruggeman all rights reserved.
Timo R. Maarleveld (tmd200@users.sourceforge.net)
Centrum Wiskunde & Informatica, Amsterdam, Netherlands
VU University, Amsterdam, Netherlands
Permission to use, modify, and distribute this software is given under the
terms of the StochPy (BSD style) license.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
from __future__ import division, print_function, absolute_import
__doc__ = """
StochPy: Stochastic Modeling in Python
=====================================
StochPy (Stochastic modeling in Python) is a flexible software tool for stochastic simulation in cell biology. It provides various stochastic
simulation algorithms, SBML support, analyses of the probability distributions of molecule copy numbers and event waiting times, analyses of stochastic time
series, and a range of additional statistical functions and plotting facilities for stochastic simulations.
Options:
--------
- Stochastic Simulations
- Variety of stochastic simulation output analysis:
--> Time Simulation
--> Distribution
--> Waiting times
--> Propensities
- Cell Division simulations
- SBML and PSC MDL input format.
StochPy can be used in an interactive Python shell:
Usage
-----
>>> import stochpy
>>> utils = stochpy.Utils()
>>> utils.doExample1()
>>> utils.doExample2()
>>> smod = stochpy.SSA() # stochastic simulation algorithm module
>>> help(smod)
>>> help(stochpy.SSA) # (some windows versions)
>>> stochpy?
>>> smod.DoStochSim()
>>> smod.PlotSpeciesTimeSeries()
>>> converter = stochpy.SBML2PSC()
>>> converter??
>>> help(stochpy.SBML2PSC)
"""
from .core2.version import __version__
import os,shutil,sys
try:
import readline
_IsReadline = True
except:
_IsReadline = False
try:
from numpy.distutils.core import setup, Extension
_IsNumPy = True
except Exception as ex:
_IsNumPy = False
print(ex)
print("StochPy requires NumPy")
print("See http://numpy.scipy.org/ for more information about NumPy")
os.sys.exit(-1)
try:
import matplotlib
_IsMPL = True
except:
_IsMPL = False
print("Warning: The Matplotlib module is not available, so plotting is not possible")
print("Info: See http://matplotlib.sourceforge.net/ for more information about Matplotlib.")
_IsPlotting = False
try:
import matplotlib.pyplot as plt
_IsPlotting = True
except Exception as er:
print(er)
def InitiateModels(directory):
"""
Build several models written in PSC MDL and SBML
Input:
- *directory* (string)
"""
from .pscmodels import Burstmodel
from .pscmodels import BirthDeath
from .pscmodels import ImmigrationDeath
from .pscmodels import DecayingDimerizing
from .pscmodels import Autoreg
from .pscmodels import CellDivision as celldivision
from .pscmodels import GeneDuplication
from .pscmodels import dsmts_001_01
from .pscmodels import dsmts_001_11
from .pscmodels import dsmts_001_19
from .pscmodels import dsmts_002_10
from .pscmodels import dsmts_003_03
from .pscmodels import dsmts_003_04
from .pscmodels import chain5
from .pscmodels import chain50
from .pscmodels import chain500
from .pscmodels import chain1500
from .pscmodels import Isomerization
from .pscmodels import Polymerase
from .pscmodels import TranscriptionIntermediate
from .pscmodels import Schlogl
from .pscmodels import SignalingTimeVaryingL
from .pscmodels import Signaling3cCD
models = {}
models['Signaling3cCD.psc'] = Signaling3cCD.model
models['SignalingTimeVaryingL.psc'] = SignalingTimeVaryingL.model
models['Schlogl.psc'] = Schlogl.model
models['Burstmodel.psc'] = Burstmodel.model
models['ImmigrationDeath.psc'] = ImmigrationDeath.model
models['BirthDeath.psc'] = BirthDeath.model
models['DecayingDimerizing.psc'] = DecayingDimerizing.model
models['Autoreg.psc'] = Autoreg.model
models['Autoreg.xml'] = Autoreg.xml_model
models['CellDivision.psc'] = celldivision.model
models['GeneDuplication.psc'] = GeneDuplication.model
models['Isomerization.psc'] = Isomerization.model
models['Polymerase.psc'] = Polymerase.model
models['TranscriptionIntermediate.psc'] = TranscriptionIntermediate.model
models['dsmts-001-01.xml.psc'] = dsmts_001_01.model
models['dsmts-001-01.xml'] = dsmts_001_01.xml_model
models['dsmts-001-11.xml.psc'] = dsmts_001_11.model
models['dsmts-001-11.xml'] = dsmts_001_11.xml_model
models['dsmts-001-19.xml.psc'] = dsmts_001_19.model
models['dsmts-001-19.xml'] = dsmts_001_19.xml_model
models['dsmts-002-10.xml.psc'] = dsmts_002_10.model
models['dsmts-002-10.xml'] = dsmts_002_10.xml_model
models['dsmts-003-03.xml.psc'] = dsmts_003_03.model
models['dsmts-003-03.xml'] = dsmts_003_03.xml_model
models['dsmts-003-04.xml.psc'] = dsmts_003_04.model
models['dsmts-003-04.xml'] = dsmts_003_04.xml_model
models['chain5.psc'] = chain5.model
models['chain50.psc'] = chain50.model
models['chain500.psc'] = chain500.model
models['chain1500.psc'] = chain1500.model
model_names = list(models)
dir_models = os.listdir(directory)
for mod_name in model_names:
if mod_name not in dir_models:
print("Info: Model {0:s} copied to {1:s}".format(mod_name ,directory) )
file_out = open(os.path.join(directory,mod_name),'w')
file_out.write(models[mod_name])
file_out.close()
output_dir = None
model_dir = None
if os.sys.platform != 'win32':
if not os.path.exists(os.path.join(os.path.expanduser('~'),'Stochpy')):
os.makedirs(os.path.join(os.path.expanduser('~'),'Stochpy'))
if not os.path.exists(os.path.join(os.path.expanduser('~'),'Stochpy', 'pscmodels')):
os.makedirs(os.path.join(os.path.expanduser('~'),'Stochpy','pscmodels'))
if not os.path.exists(os.path.join(os.path.expanduser('~'),'Stochpy', 'temp')):
os.makedirs(os.path.join(os.path.expanduser('~'),'Stochpy','temp'))
output_dir = os.path.join(os.path.expanduser('~'),'Stochpy')
model_dir = os.path.join(os.path.expanduser('~'),'Stochpy','pscmodels')
temp_dir = os.path.join(os.path.expanduser('~'),'Stochpy','temp')
InitiateModels(model_dir)
else:
if not os.path.exists(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Stochpy')):
os.makedirs(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Stochpy'))
if not os.path.exists(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Stochpy','pscmodels')):
os.makedirs(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Stochpy','pscmodels'))
if not os.path.exists(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Stochpy','temp')):
os.makedirs(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Stochpy','temp'))
output_dir = os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Stochpy',)
model_dir = os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Stochpy','pscmodels')
temp_dir = os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Stochpy','temp')
InitiateModels(model_dir)
from .modules.SBML2PSC import SBML2PSC
from .modules.StochSim import SSA
from .modules.StochPyUtils import Utils
from .modules.StochPyCellDivision import CellDivision
from .modules.StochPyDemo import Demo
from .modules import Analysis as Analysis
try:
from .modules.NucleosomeTool import NucleosomeModelBuilder
from .modules.NucleosomeTool import NucleosomeSimulator
except Exception as er:
pass # ignore
def DeletePreviousOutput(path,type):
"""
Delete output of earlier simulations
Input:
- *path* (string)
- *type* (string)
"""
for filename in os.listdir(path):
if filename.endswith(type):
filename_path = os.path.join(path,filename)
os.remove(filename_path)
def DeleteExistingData(path):
"""
Delete all existing StochKit simulation data
Input:
- *path* (string)
"""
if os.path.exists(path):
for maps in os.listdir(path):
dir2delete = os.path.join(path,maps)
shutil.rmtree(dir2delete, ignore_errors=True)
def SaveInteractiveSession(filename='interactiveSession.py',path=output_dir):
"""
Save the interactive session
Input:
- *filename*: [default = interactiveSession.py'] (string)
- *path*: (string)
"""
if not _IsReadline:
print("Error: install 'readline' first")
elif _IsReadline:
historyPath = os.path.join(path,filename)
if not os.path.exists(path):
os.makedirs(directory)
readline.write_history_file(historyPath)
file_in = open(historyPath,'r')
history_list = file_in.readlines()
n_import_statement = 0
for command in history_list:
if 'import' in command and 'stochpy' in command:
n_import_statement +=1
n=0
file_out = open(historyPath,'w')
for command in history_list:
if 'import' in command and 'stochpy' in command:
n+=1
if n==n_import_statement:
file_out.write(command)
file_out.close()
print("Info: Interactive session successfully saved at {0:s}".format(historyPath) )
print("Info: use 'ipython {0:s} to restart modeling with this interactive session".format(filename) )
DeletePreviousOutput(temp_dir,'.dat')
DeletePreviousOutput(temp_dir,'.xml')
DeletePreviousOutput(temp_dir,'.txt')
DeletePreviousOutput(temp_dir,'temp_parse_module')
DeleteExistingData(temp_dir)
#readline.clear_history()
print("""
#######################################################################
# #
# Welcome to the interactive StochPy environment #
# #
#######################################################################
# StochPy: Stochastic modeling in Python #
# http://stochpy.sourceforge.net #
# Copyright(C) T.R Maarleveld, B.G. Olivier, F.J Bruggeman 2010-2015 #
# DOI: 10.1371/journal.pone.0079345 #
# Email: tmd200@users.sourceforge.net #
# VU University, Amsterdam, Netherlands #
# Centrum Wiskunde Informatica, Amsterdam, Netherlands #
# StochPy is distributed under the BSD licence. #
#######################################################################
""")
print("Version {0:s}".format(__version__) )
print("Output Directory: {0:s}".format(output_dir) )
print("Model Directory: {0:s}".format(model_dir) )
#print("Warning: Figure freezing? Try a different matplotlib backend (stochpy.plt.switch_backend) and/or set IsInteractive to False (see user guide)")
| 40.178694
| 168
| 0.629234
|
e48d4cfd7b3e6484a3c31e160208e23335658a3a
| 2,619
|
py
|
Python
|
accuracy/visualization/plots_lib.py
|
phanhuy1502/FYP
|
600f694bc173547fba167e4885762d96e73f061e
|
[
"MIT"
] | null | null | null |
accuracy/visualization/plots_lib.py
|
phanhuy1502/FYP
|
600f694bc173547fba167e4885762d96e73f061e
|
[
"MIT"
] | null | null | null |
accuracy/visualization/plots_lib.py
|
phanhuy1502/FYP
|
600f694bc173547fba167e4885762d96e73f061e
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def parallel_coordinates(data_sets, style=None):
dims = len(data_sets[0])
x = range(dims)
fig, axes = plt.subplots(1, dims-1, sharey=False)
if style is None:
style = ['r-']*len(data_sets)
# Calculate the limits on the data
min_max_range = list()
for m in zip(*data_sets):
mn = min(m)
mx = max(m)
if mn == mx:
mn -= 0.5
mx = mn + 1.
r = float(mx - mn)
min_max_range.append((mn, mx, r))
# Normalize the data sets
norm_data_sets = list()
for ds in data_sets:
nds = [(value - min_max_range[dimension][0]) /
min_max_range[dimension][2]
for dimension,value in enumerate(ds)]
norm_data_sets.append(nds)
data_sets = norm_data_sets
# Plot the datasets on all the subplots
for i, ax in enumerate(axes):
for dsi, d in enumerate(data_sets):
ax.plot(x, d, style[dsi])
ax.set_xlim([x[i], x[i+1]])
# Set the x axis ticks
for dimension, (axx,xx) in enumerate(zip(axes, x[:-1])):
axx.xaxis.set_major_locator(ticker.FixedLocator([xx]))
ticks = len(axx.get_yticklabels())
labels = list()
step = min_max_range[dimension][2] / (ticks - 1)
mn = min_max_range[dimension][0]
for i in xrange(ticks):
v = mn + i*step
labels.append('%4.2f' % v)
axx.set_yticklabels(labels)
# Move the final axis' ticks to the right-hand side
axx = plt.twinx(axes[-1])
dimension += 1
axx.xaxis.set_major_locator(ticker.FixedLocator([x[-2], x[-1]]))
ticks = len(axx.get_yticklabels())
step = min_max_range[dimension][2] / (ticks - 1)
mn = min_max_range[dimension][0]
labels = ['%4.2f' % (mn + i*step) for i in xrange(ticks)]
axx.set_yticklabels(labels)
# Stack the subplots
plt.subplots_adjust(wspace=0)
return plt
if __name__ == '__main__':
import random
base = [0, 0, 5, 5, 0]
scale = [1.5, 2., 1.0, 2., 2.]
data = [[base[x] + random.uniform(0., 1.)*scale[x]
for x in xrange(5)] for y in xrange(30)]
colors = ['r','b','g','c','m','y','k','w'] * 30
base = [3, 6, 0, 1, 3]
scale = [1.5, 2., 2.5, 2., 2.]
data.extend([[base[x] + random.uniform(0., 1.)*scale[x]
for x in xrange(5)] for y in xrange(30)])
colors.extend(['b'] * 30)
print (len(data))
print (data[0])
data = [[1,3,4],[2,3,4],[0,3,5]]
parallel_coordinates(data, style=colors).show()
| 30.103448
| 68
| 0.559374
|
bd481a202c10749e8890f8eece5014564f4d52e4
| 1,131
|
py
|
Python
|
localizacao/viewsets.py
|
WesGtoX/agro-digital
|
c4a453ae24e774cac48b032c921916820ff7b38f
|
[
"MIT"
] | null | null | null |
localizacao/viewsets.py
|
WesGtoX/agro-digital
|
c4a453ae24e774cac48b032c921916820ff7b38f
|
[
"MIT"
] | null | null | null |
localizacao/viewsets.py
|
WesGtoX/agro-digital
|
c4a453ae24e774cac48b032c921916820ff7b38f
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets
from slugify import slugify
from .models import Regiao, Cidade
from .serializers import RegiaoSerializer, CidadeSerializer
class RegiaoViewSet(viewsets.ModelViewSet):
queryset = Regiao.objects.all()
serializer_class = RegiaoSerializer
def perform_create(self, serializer):
nome = serializer.validated_data.get('nome', '')
estado = serializer.validated_data.get('estado', '')
serializer.save(slug=slugify(f'{nome} {estado}'))
def perform_update(self, serializer):
nome = serializer.validated_data.get('nome', '')
estado = serializer.validated_data.get('estado', '')
serializer.save(slug=slugify(f'{nome} {estado}'))
class CidadeViewSet(viewsets.ModelViewSet):
queryset = Cidade.objects.all()
serializer_class = CidadeSerializer
def perform_create(self, serializer):
nome = serializer.validated_data.get('nome', '')
serializer.save(slug=slugify(nome))
def perform_update(self, serializer):
nome = serializer.validated_data.get('nome', '')
serializer.save(slug=slugify(nome))
| 32.314286
| 60
| 0.704686
|
568b531570d13c5ff92688a457d05097c6f68805
| 7,533
|
py
|
Python
|
mycroft/util/file_utils.py
|
OpenVoiceOS/ovos-core
|
662577740a6e8cb5bb92bec014a05be9a6870b05
|
[
"Apache-2.0"
] | 4
|
2021-11-03T22:06:30.000Z
|
2021-11-19T08:03:58.000Z
|
mycroft/util/file_utils.py
|
OpenVoiceOS/ovos-core
|
662577740a6e8cb5bb92bec014a05be9a6870b05
|
[
"Apache-2.0"
] | 62
|
2021-10-29T21:18:51.000Z
|
2022-03-30T07:08:29.000Z
|
mycroft/util/file_utils.py
|
OpenVoiceOS/ovos-core
|
662577740a6e8cb5bb92bec014a05be9a6870b05
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mycroft file utils.
This module contains functions handling mycroft resource files and things like
accessing and curating mycroft's cache.
"""
import os
import time
from os.path import dirname
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from ovos_utils.file_utils import get_temp_path
from ovos_utils.configuration import get_xdg_base, get_xdg_data_dirs, \
get_xdg_data_save_path, get_xdg_cache_save_path
import mycroft.configuration
from mycroft.util.log import LOG
# do not delete these imports, here for backwards compat!
from ovos_plugin_manager.utils.tts_cache import curate_cache, mb_to_bytes
def resolve_resource_file(res_name):
"""Convert a resource into an absolute filename.
Resource names are in the form: 'filename.ext'
or 'path/filename.ext'
The system wil look for $XDG_DATA_DIRS/mycroft/res_name first
(defaults to ~/.local/share/mycroft/res_name), and if not found will
look at /opt/mycroft/res_name, then finally it will look for res_name
in the 'mycroft/res' folder of the source code package.
Example:
With mycroft running as the user 'bob', if you called
``resolve_resource_file('snd/beep.wav')``
it would return either:
'$XDG_DATA_DIRS/mycroft/beep.wav',
'/home/bob/.mycroft/snd/beep.wav' or
'/opt/mycroft/snd/beep.wav' or
'.../mycroft/res/snd/beep.wav'
where the '...' is replaced by the path
where the package has been installed.
Args:
res_name (str): a resource path/name
Returns:
(str) path to resource or None if no resource found
"""
config = mycroft.configuration.Configuration()
# First look for fully qualified file (e.g. a user setting)
if os.path.isfile(res_name):
return res_name
# Now look for XDG_DATA_DIRS
for path in get_xdg_data_dirs():
filename = os.path.join(path, res_name)
if os.path.isfile(filename):
return filename
# Now look in the old user location
filename = os.path.join(os.path.expanduser('~'),
f'.{get_xdg_base()}',
res_name)
if os.path.isfile(filename):
return filename
# Next look for /opt/mycroft/res/res_name
data_dir = config.get('data_dir', get_xdg_data_save_path())
res_dir = os.path.join(data_dir, 'res')
filename = os.path.expanduser(os.path.join(res_dir, res_name))
if os.path.isfile(filename):
return filename
# Finally look for it in the source package
filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name)
filename = os.path.abspath(os.path.normpath(filename))
if os.path.isfile(filename):
return filename
return None # Resource cannot be resolved
def read_stripped_lines(filename):
"""Read a file and return a list of stripped lines.
Args:
filename (str): path to file to read.
Returns:
(list) list of lines stripped from leading and ending white chars.
"""
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if line:
yield line
def read_dict(filename, div='='):
"""Read file into dict.
A file containing:
foo = bar
baz = bog
results in a dict
{
'foo': 'bar',
'baz': 'bog'
}
Args:
filename (str): path to file
div (str): deviders between dict keys and values
Returns:
(dict) generated dictionary
"""
d = {}
with open(filename, 'r') as f:
for line in f:
key, val = line.split(div)
d[key.strip()] = val.strip()
return d
def get_cache_directory(domain=None):
"""Get a directory for caching data.
This directory can be used to hold temporary caches of data to
speed up performance. This directory will likely be part of a
small RAM disk and may be cleared at any time. So code that
uses these cached files must be able to fallback and regenerate
the file.
Args:
domain (str): The cache domain. Basically just a subdirectory.
Returns:
(str) a path to the directory where you can cache data
"""
config = mycroft.configuration.Configuration()
directory = config.get("cache_path") or get_xdg_cache_save_path()
return ensure_directory_exists(directory, domain)
def ensure_directory_exists(directory, domain=None, permissions=0o777):
"""Create a directory and give access rights to all
Args:
directory (str): Root directory
domain (str): Domain. Basically a subdirectory to prevent things like
overlapping signal filenames.
rights (int): Directory permissions (default is 0o777)
Returns:
(str) a path to the directory
"""
if domain:
directory = os.path.join(directory, domain)
# Expand and normalize the path
directory = os.path.normpath(directory)
directory = os.path.expanduser(directory)
if not os.path.isdir(directory):
try:
save = os.umask(0)
os.makedirs(directory, permissions)
except OSError:
LOG.warning("Failed to create: " + directory)
finally:
os.umask(save)
return directory
def create_file(filename):
"""Create the file filename and create any directories needed
Args:
filename: Path to the file to be created
"""
ensure_directory_exists(os.path.dirname(filename), permissions=0o775)
with open(filename, 'w') as f:
f.write('')
os.chmod(filename, 0o777)
class FileWatcher:
def __init__(self, files, callback, recursive=False, ignore_creation=False):
self.observer = Observer()
self.handlers = []
for file_path in files:
watch_dir = dirname(file_path)
self.observer.schedule(FileEventHandler(file_path, callback, ignore_creation),
watch_dir, recursive=recursive)
self.observer.start()
def shutdown(self):
self.observer.unschedule_all()
self.observer.stop()
class FileEventHandler(FileSystemEventHandler):
def __init__(self, file_path, callback, ignore_creation=False):
super().__init__()
self._callback = callback
self._file_path = file_path
self._debounce = 1
self._last_update = 0
if ignore_creation:
self._events = ('modified')
else:
self._events = ('created', 'modified')
def on_any_event(self, event):
if event.is_directory:
return
elif event.event_type in self._events:
if event.src_path == self._file_path:
if time.time() - self._last_update >= self._debounce:
self._callback(event.src_path)
self._last_update = time.time()
| 31.128099
| 90
| 0.653259
|
a90ec2349a42388bec95da6af495c9a0c076b58e
| 11,251
|
py
|
Python
|
mayan/apps/linking/tests/test_api.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 2,743
|
2017-12-18T07:12:30.000Z
|
2022-03-27T17:21:25.000Z
|
mayan/apps/linking/tests/test_api.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 15
|
2017-12-18T14:58:07.000Z
|
2021-03-01T20:05:05.000Z
|
mayan/apps/linking/tests/test_api.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 257
|
2017-12-18T03:12:58.000Z
|
2022-03-25T08:59:10.000Z
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.test import override_settings
from django.urls import reverse
from documents.models import DocumentType
from documents.tests.literals import (
TEST_DOCUMENT_TYPE_LABEL, TEST_SMALL_DOCUMENT_PATH
)
from rest_api.tests import BaseAPITestCase
from user_management.tests.literals import (
TEST_ADMIN_EMAIL, TEST_ADMIN_PASSWORD, TEST_ADMIN_USERNAME
)
from ..models import SmartLink, SmartLinkCondition
from .literals import (
TEST_SMART_LINK_CONDITION_FOREIGN_DOCUMENT_DATA,
TEST_SMART_LINK_CONDITION_EXPRESSION,
TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED,
TEST_SMART_LINK_CONDITION_OPERATOR, TEST_SMART_LINK_DYNAMIC_LABEL,
TEST_SMART_LINK_LABEL_EDITED, TEST_SMART_LINK_LABEL
)
@override_settings(OCR_AUTO_OCR=False)
class SmartLinkAPITestCase(BaseAPITestCase):
def setUp(self):
super(SmartLinkAPITestCase, self).setUp()
self.admin_user = get_user_model().objects.create_superuser(
username=TEST_ADMIN_USERNAME, email=TEST_ADMIN_EMAIL,
password=TEST_ADMIN_PASSWORD
)
self.client.login(
username=TEST_ADMIN_USERNAME, password=TEST_ADMIN_PASSWORD
)
def tearDown(self):
if hasattr(self, 'document_type'):
self.document_type.delete()
super(SmartLinkAPITestCase, self).tearDown()
def _create_document_type(self):
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_LABEL
)
def _create_document(self):
with open(TEST_SMALL_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=file_object
)
def _create_smart_link(self):
return SmartLink.objects.create(
label=TEST_SMART_LINK_LABEL,
dynamic_label=TEST_SMART_LINK_DYNAMIC_LABEL
)
def test_smart_link_create_view(self):
response = self.client.post(
reverse('rest_api:smartlink-list'), {
'label': TEST_SMART_LINK_LABEL
}
)
smart_link = SmartLink.objects.first()
self.assertEqual(response.data['id'], smart_link.pk)
self.assertEqual(response.data['label'], TEST_SMART_LINK_LABEL)
self.assertEqual(SmartLink.objects.count(), 1)
self.assertEqual(smart_link.label, TEST_SMART_LINK_LABEL)
def test_smart_link_create_with_document_types_view(self):
self._create_document_type()
response = self.client.post(
reverse('rest_api:smartlink-list'), data={
'label': TEST_SMART_LINK_LABEL,
'document_types_pk_list': self.document_type.pk
},
)
smart_link = SmartLink.objects.first()
self.assertEqual(response.data['id'], smart_link.pk)
self.assertEqual(response.data['label'], TEST_SMART_LINK_LABEL)
self.assertEqual(SmartLink.objects.count(), 1)
self.assertEqual(smart_link.label, TEST_SMART_LINK_LABEL)
self.assertQuerysetEqual(
smart_link.document_types.all(), (repr(self.document_type),)
)
def test_smart_link_delete_view(self):
smart_link = self._create_smart_link()
self.client.delete(
reverse('rest_api:smartlink-detail', args=(smart_link.pk,))
)
self.assertEqual(SmartLink.objects.count(), 0)
def test_smart_link_detail_view(self):
smart_link = self._create_smart_link()
response = self.client.get(
reverse('rest_api:smartlink-detail', args=(smart_link.pk,))
)
self.assertEqual(
response.data['label'], TEST_SMART_LINK_LABEL
)
def test_smart_link_patch_view(self):
self._create_document_type()
smart_link = self._create_smart_link()
self.client.patch(
reverse('rest_api:smartlink-detail', args=(smart_link.pk,)),
data={
'label': TEST_SMART_LINK_LABEL_EDITED,
'document_types_pk_list': self.document_type.pk
}
)
smart_link.refresh_from_db()
self.assertEqual(smart_link.label, TEST_SMART_LINK_LABEL_EDITED)
self.assertQuerysetEqual(
smart_link.document_types.all(), (repr(self.document_type),)
)
def test_smart_link_put_view(self):
smart_link = self._create_smart_link()
self.client.put(
reverse('rest_api:smartlink-detail', args=(smart_link.pk,)),
data={
'label': TEST_SMART_LINK_LABEL_EDITED,
}
)
smart_link.refresh_from_db()
self.assertEqual(smart_link.label, TEST_SMART_LINK_LABEL_EDITED)
@override_settings(OCR_AUTO_OCR=False)
class SmartLinkConditionAPITestCase(BaseAPITestCase):
def setUp(self):
super(SmartLinkConditionAPITestCase, self).setUp()
self.admin_user = get_user_model().objects.create_superuser(
username=TEST_ADMIN_USERNAME, email=TEST_ADMIN_EMAIL,
password=TEST_ADMIN_PASSWORD
)
self.client.login(
username=TEST_ADMIN_USERNAME, password=TEST_ADMIN_PASSWORD
)
def tearDown(self):
if hasattr(self, 'document_type'):
self.document_type.delete()
super(SmartLinkConditionAPITestCase, self).tearDown()
def _create_document_type(self):
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_LABEL
)
def _create_document(self):
with open(TEST_SMALL_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=file_object
)
def _create_smart_link(self):
self.smart_link = SmartLink.objects.create(
label=TEST_SMART_LINK_LABEL,
dynamic_label=TEST_SMART_LINK_DYNAMIC_LABEL
)
self.smart_link.document_types.add(self.document_type)
def _create_smart_link_condition(self):
self.smart_link_condition = SmartLinkCondition.objects.create(
smart_link=self.smart_link,
foreign_document_data=TEST_SMART_LINK_CONDITION_FOREIGN_DOCUMENT_DATA,
expression=TEST_SMART_LINK_CONDITION_EXPRESSION,
operator=TEST_SMART_LINK_CONDITION_OPERATOR
)
def test_resolved_smart_link_detail_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self._create_document()
response = self.client.get(
reverse(
'rest_api:resolvedsmartlink-detail',
args=(self.document.pk, self.smart_link.pk)
)
)
self.assertEqual(
response.data['label'], TEST_SMART_LINK_LABEL
)
def test_resolved_smart_link_list_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self._create_document()
response = self.client.get(
reverse(
'rest_api:resolvedsmartlink-list', args=(self.document.pk,)
)
)
self.assertEqual(
response.data['results'][0]['label'], TEST_SMART_LINK_LABEL
)
def test_resolved_smart_link_document_list_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self._create_document()
response = self.client.get(
reverse(
'rest_api:resolvedsmartlinkdocument-list',
args=(self.document.pk, self.smart_link.pk)
)
)
self.assertEqual(
response.data['results'][0]['label'], self.document.label
)
def test_smart_link_condition_create_view(self):
self._create_document_type()
self._create_smart_link()
response = self.client.post(
reverse(
'rest_api:smartlinkcondition-list', args=(self.smart_link.pk,)
), {
'foreign_document_data': TEST_SMART_LINK_CONDITION_FOREIGN_DOCUMENT_DATA,
'expression': TEST_SMART_LINK_CONDITION_EXPRESSION,
'operator': TEST_SMART_LINK_CONDITION_OPERATOR
}
)
smart_link_condition = SmartLinkCondition.objects.first()
self.assertEqual(response.data['id'], smart_link_condition.pk)
self.assertEqual(
response.data['operator'], TEST_SMART_LINK_CONDITION_OPERATOR
)
self.assertEqual(SmartLinkCondition.objects.count(), 1)
self.assertEqual(
smart_link_condition.operator, TEST_SMART_LINK_CONDITION_OPERATOR
)
def test_smart_link_condition_delete_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self.client.delete(
reverse(
'rest_api:smartlinkcondition-detail',
args=(self.smart_link.pk, self.smart_link_condition.pk)
)
)
self.assertEqual(SmartLinkCondition.objects.count(), 0)
def test_smart_link_condition_detail_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
response = self.client.get(
reverse(
'rest_api:smartlinkcondition-detail',
args=(self.smart_link.pk, self.smart_link_condition.pk)
)
)
self.assertEqual(
response.data['operator'], TEST_SMART_LINK_CONDITION_OPERATOR
)
def test_smart_link_condition_patch_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self.client.patch(
reverse(
'rest_api:smartlinkcondition-detail',
args=(self.smart_link.pk, self.smart_link_condition.pk)
),
data={
'expression': TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED,
}
)
self.smart_link_condition.refresh_from_db()
self.assertEqual(
self.smart_link_condition.expression,
TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED
)
def test_smart_link_condition_put_view(self):
self._create_document_type()
self._create_smart_link()
self._create_smart_link_condition()
self.client.put(
reverse(
'rest_api:smartlinkcondition-detail',
args=(self.smart_link.pk, self.smart_link_condition.pk)
),
data={
'expression': TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED,
'foreign_document_data': TEST_SMART_LINK_CONDITION_FOREIGN_DOCUMENT_DATA,
'operator': TEST_SMART_LINK_CONDITION_OPERATOR,
}
)
self.smart_link_condition.refresh_from_db()
self.assertEqual(
self.smart_link_condition.expression,
TEST_SMART_LINK_CONDITION_EXPRESSION_EDITED
)
| 32.611594
| 89
| 0.652298
|
2a4f3d9a784546d79b9452955fdae4420925723c
| 47,860
|
py
|
Python
|
tests/gclient_scm_test.py
|
nbaiot/depot_tools
|
efce0d1b7657c440c90f0f4bce614b96672b9e0b
|
[
"BSD-3-Clause"
] | 1
|
2021-09-10T06:10:02.000Z
|
2021-09-10T06:10:02.000Z
|
tests/gclient_scm_test.py
|
nbaiot/depot_tools
|
efce0d1b7657c440c90f0f4bce614b96672b9e0b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/gclient_scm_test.py
|
nbaiot/depot_tools
|
efce0d1b7657c440c90f0f4bce614b96672b9e0b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env vpython3
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for gclient_scm.py."""
# pylint: disable=E1103
from shutil import rmtree
from subprocess import Popen, PIPE, STDOUT
import json
import logging
import os
import re
import sys
import tempfile
import unittest
if sys.version_info.major == 2:
from cStringIO import StringIO
else:
from io import StringIO
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from third_party import mock
from testing_support import fake_repos
from testing_support import test_case_utils
import gclient_scm
import git_cache
import subprocess2
# Disable global git cache
git_cache.Mirror.SetCachePath(None)
# Shortcut since this function is used often
join = gclient_scm.os.path.join
TIMESTAMP_RE = re.compile('\[[0-9]{1,2}:[0-9]{2}:[0-9]{2}\] (.*)', re.DOTALL)
def strip_timestamps(value):
lines = value.splitlines(True)
for i in range(len(lines)):
m = TIMESTAMP_RE.match(lines[i])
if m:
lines[i] = m.group(1)
return ''.join(lines)
class BasicTests(unittest.TestCase):
@mock.patch('gclient_scm.scm.GIT.Capture')
def testGetFirstRemoteUrl(self, mockCapture):
REMOTE_STRINGS = [('remote.origin.url E:\\foo\\bar', 'E:\\foo\\bar'),
('remote.origin.url /b/foo/bar', '/b/foo/bar'),
('remote.origin.url https://foo/bar', 'https://foo/bar'),
('remote.origin.url E:\\Fo Bar\\bax', 'E:\\Fo Bar\\bax'),
('remote.origin.url git://what/"do', 'git://what/"do')]
FAKE_PATH = '/fake/path'
mockCapture.side_effect = [question for question, _ in REMOTE_STRINGS]
for _, answer in REMOTE_STRINGS:
self.assertEqual(
gclient_scm.SCMWrapper._get_first_remote_url(FAKE_PATH), answer)
expected_calls = [
mock.call(['config', '--local', '--get-regexp', r'remote.*.url'],
cwd=FAKE_PATH)
for _ in REMOTE_STRINGS
]
self.assertEqual(mockCapture.mock_calls, expected_calls)
class BaseGitWrapperTestCase(unittest.TestCase, test_case_utils.TestCaseUtils):
"""This class doesn't use pymox."""
class OptionsObject(object):
def __init__(self, verbose=False, revision=None):
self.auto_rebase = False
self.verbose = verbose
self.revision = revision
self.deps_os = None
self.force = False
self.reset = False
self.nohooks = False
self.no_history = False
self.upstream = False
self.cache_dir = None
self.merge = False
self.jobs = 1
self.break_repo_locks = False
self.delete_unversioned_trees = False
self.patch_ref = None
self.patch_repo = None
self.rebase_patch_ref = True
self.reset_patch_ref = True
sample_git_import = """blob
mark :1
data 6
Hello
blob
mark :2
data 4
Bye
reset refs/heads/master
commit refs/heads/master
mark :3
author Bob <bob@example.com> 1253744361 -0700
committer Bob <bob@example.com> 1253744361 -0700
data 8
A and B
M 100644 :1 a
M 100644 :2 b
blob
mark :4
data 10
Hello
You
blob
mark :5
data 8
Bye
You
commit refs/heads/origin
mark :6
author Alice <alice@example.com> 1253744424 -0700
committer Alice <alice@example.com> 1253744424 -0700
data 13
Personalized
from :3
M 100644 :4 a
M 100644 :5 b
blob
mark :7
data 5
Mooh
commit refs/heads/feature
mark :8
author Bob <bob@example.com> 1390311986 -0000
committer Bob <bob@example.com> 1390311986 -0000
data 6
Add C
from :3
M 100644 :7 c
reset refs/heads/master
from :3
"""
def Options(self, *args, **kwargs):
return self.OptionsObject(*args, **kwargs)
def checkstdout(self, expected):
value = sys.stdout.getvalue()
sys.stdout.close()
# pylint: disable=no-member
self.assertEqual(expected, strip_timestamps(value))
@staticmethod
def CreateGitRepo(git_import, path):
"""Do it for real."""
try:
Popen(['git', 'init', '-q'], stdout=PIPE, stderr=STDOUT,
cwd=path).communicate()
except OSError:
# git is not available, skip this test.
return False
Popen(['git', 'fast-import', '--quiet'], stdin=PIPE, stdout=PIPE,
stderr=STDOUT, cwd=path).communicate(input=git_import.encode())
Popen(['git', 'checkout', '-q'], stdout=PIPE, stderr=STDOUT,
cwd=path).communicate()
Popen(['git', 'remote', 'add', '-f', 'origin', '.'], stdout=PIPE,
stderr=STDOUT, cwd=path).communicate()
Popen(['git', 'checkout', '-b', 'new', 'origin/master', '-q'], stdout=PIPE,
stderr=STDOUT, cwd=path).communicate()
Popen(['git', 'push', 'origin', 'origin/origin:origin/master', '-q'],
stdout=PIPE, stderr=STDOUT, cwd=path).communicate()
Popen(['git', 'config', '--unset', 'remote.origin.fetch'], stdout=PIPE,
stderr=STDOUT, cwd=path).communicate()
Popen(['git', 'config', 'user.email', 'someuser@chromium.org'], stdout=PIPE,
stderr=STDOUT, cwd=path).communicate()
Popen(['git', 'config', 'user.name', 'Some User'], stdout=PIPE,
stderr=STDOUT, cwd=path).communicate()
return True
def _GetAskForDataCallback(self, expected_prompt, return_value):
def AskForData(prompt, options):
self.assertEqual(prompt, expected_prompt)
return return_value
return AskForData
def setUp(self):
unittest.TestCase.setUp(self)
test_case_utils.TestCaseUtils.setUp(self)
self.url = 'git://foo'
# The .git suffix allows gclient_scm to recognize the dir as a git repo
# when cloning it locally
self.root_dir = tempfile.mkdtemp('.git')
self.relpath = '.'
self.base_path = join(self.root_dir, self.relpath)
self.enabled = self.CreateGitRepo(self.sample_git_import, self.base_path)
self._original_GitBinaryExists = gclient_scm.GitWrapper.BinaryExists
mock.patch('gclient_scm.GitWrapper.BinaryExists',
staticmethod(lambda : True)).start()
mock.patch('sys.stdout', StringIO()).start()
self.addCleanup(mock.patch.stopall)
self.addCleanup(lambda: rmtree(self.root_dir))
class ManagedGitWrapperTestCase(BaseGitWrapperTestCase):
def testRevertMissing(self):
if not self.enabled:
return
options = self.Options()
file_path = join(self.base_path, 'a')
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
scm.update(options, None, file_list)
gclient_scm.os.remove(file_path)
file_list = []
scm.revert(options, self.args, file_list)
self.assertEqual(file_list, [file_path])
file_list = []
scm.diff(options, self.args, file_list)
self.assertEqual(file_list, [])
sys.stdout.close()
def testRevertNone(self):
if not self.enabled:
return
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
scm.update(options, None, file_list)
file_list = []
scm.revert(options, self.args, file_list)
self.assertEqual(file_list, [])
self.assertEqual(scm.revinfo(options, self.args, None),
'a7142dc9f0009350b96a11f372b6ea658592aa95')
sys.stdout.close()
def testRevertModified(self):
if not self.enabled:
return
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
scm.update(options, None, file_list)
file_path = join(self.base_path, 'a')
with open(file_path, 'a') as f:
f.writelines('touched\n')
file_list = []
scm.revert(options, self.args, file_list)
self.assertEqual(file_list, [file_path])
file_list = []
scm.diff(options, self.args, file_list)
self.assertEqual(file_list, [])
self.assertEqual(scm.revinfo(options, self.args, None),
'a7142dc9f0009350b96a11f372b6ea658592aa95')
sys.stdout.close()
def testRevertNew(self):
if not self.enabled:
return
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
scm.update(options, None, file_list)
file_path = join(self.base_path, 'c')
with open(file_path, 'w') as f:
f.writelines('new\n')
Popen(['git', 'add', 'c'], stdout=PIPE,
stderr=STDOUT, cwd=self.base_path).communicate()
file_list = []
scm.revert(options, self.args, file_list)
self.assertEqual(file_list, [file_path])
file_list = []
scm.diff(options, self.args, file_list)
self.assertEqual(file_list, [])
self.assertEqual(scm.revinfo(options, self.args, None),
'a7142dc9f0009350b96a11f372b6ea658592aa95')
sys.stdout.close()
def testStatusNew(self):
if not self.enabled:
return
options = self.Options()
file_path = join(self.base_path, 'a')
with open(file_path, 'a') as f:
f.writelines('touched\n')
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
scm.status(options, self.args, file_list)
self.assertEqual(file_list, [file_path])
self.checkstdout(
('\n________ running \'git -c core.quotePath=false diff --name-status '
'069c602044c5388d2d15c3f875b057c852003458\' in \'%s\'\n\nM\ta\n') %
join(self.root_dir, '.'))
def testStatus2New(self):
if not self.enabled:
return
options = self.Options()
expected_file_list = []
for f in ['a', 'b']:
file_path = join(self.base_path, f)
with open(file_path, 'a') as f:
f.writelines('touched\n')
expected_file_list.extend([file_path])
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
scm.status(options, self.args, file_list)
expected_file_list = [join(self.base_path, x) for x in ['a', 'b']]
self.assertEqual(sorted(file_list), expected_file_list)
self.checkstdout(
('\n________ running \'git -c core.quotePath=false diff --name-status '
'069c602044c5388d2d15c3f875b057c852003458\' in \'%s\'\n\nM\ta\nM\tb\n')
% join(self.root_dir, '.'))
def testUpdateUpdate(self):
if not self.enabled:
return
options = self.Options()
expected_file_list = [join(self.base_path, x) for x in ['a', 'b']]
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
scm.update(options, (), file_list)
self.assertEqual(file_list, expected_file_list)
self.assertEqual(scm.revinfo(options, (), None),
'a7142dc9f0009350b96a11f372b6ea658592aa95')
sys.stdout.close()
def testUpdateMerge(self):
if not self.enabled:
return
options = self.Options()
options.merge = True
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
scm._Run(['checkout', '-q', 'feature'], options)
rev = scm.revinfo(options, (), None)
file_list = []
scm.update(options, (), file_list)
self.assertEqual(file_list, [join(self.base_path, x)
for x in ['a', 'b', 'c']])
# The actual commit that is created is unstable, so we verify its tree and
# parents instead.
self.assertEqual(scm._Capture(['rev-parse', 'HEAD:']),
'd2e35c10ac24d6c621e14a1fcadceb533155627d')
self.assertEqual(scm._Capture(['rev-parse', 'HEAD^1']), rev)
self.assertEqual(scm._Capture(['rev-parse', 'HEAD^2']),
scm._Capture(['rev-parse', 'origin/master']))
sys.stdout.close()
def testUpdateRebase(self):
if not self.enabled:
return
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
scm._Run(['checkout', '-q', 'feature'], options)
file_list = []
# Fake a 'y' key press.
scm._AskForData = self._GetAskForDataCallback(
'Cannot fast-forward merge, attempt to rebase? '
'(y)es / (q)uit / (s)kip : ', 'y')
scm.update(options, (), file_list)
self.assertEqual(file_list, [join(self.base_path, x)
for x in ['a', 'b', 'c']])
# The actual commit that is created is unstable, so we verify its tree and
# parent instead.
self.assertEqual(scm._Capture(['rev-parse', 'HEAD:']),
'd2e35c10ac24d6c621e14a1fcadceb533155627d')
self.assertEqual(scm._Capture(['rev-parse', 'HEAD^']),
scm._Capture(['rev-parse', 'origin/master']))
sys.stdout.close()
def testUpdateReset(self):
if not self.enabled:
return
options = self.Options()
options.reset = True
dir_path = join(self.base_path, 'c')
os.mkdir(dir_path)
with open(join(dir_path, 'nested'), 'w') as f:
f.writelines('new\n')
file_path = join(self.base_path, 'file')
with open(file_path, 'w') as f:
f.writelines('new\n')
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
scm.update(options, (), file_list)
self.assert_(gclient_scm.os.path.isdir(dir_path))
self.assert_(gclient_scm.os.path.isfile(file_path))
sys.stdout.close()
def testUpdateResetUnsetsFetchConfig(self):
if not self.enabled:
return
options = self.Options()
options.reset = True
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
scm._Run(['config', 'remote.origin.fetch',
'+refs/heads/bad/ref:refs/remotes/origin/bad/ref'], options)
file_list = []
scm.update(options, (), file_list)
self.assertEqual(scm.revinfo(options, (), None),
'069c602044c5388d2d15c3f875b057c852003458')
sys.stdout.close()
def testUpdateResetDeleteUnversionedTrees(self):
if not self.enabled:
return
options = self.Options()
options.reset = True
options.delete_unversioned_trees = True
dir_path = join(self.base_path, 'dir')
os.mkdir(dir_path)
with open(join(dir_path, 'nested'), 'w') as f:
f.writelines('new\n')
file_path = join(self.base_path, 'file')
with open(file_path, 'w') as f:
f.writelines('new\n')
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
scm.update(options, (), file_list)
self.assert_(not gclient_scm.os.path.isdir(dir_path))
self.assert_(gclient_scm.os.path.isfile(file_path))
sys.stdout.close()
def testUpdateUnstagedConflict(self):
if not self.enabled:
return
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_path = join(self.base_path, 'b')
with open(file_path, 'w') as f:
f.writelines('conflict\n')
try:
scm.update(options, (), [])
self.fail()
except (gclient_scm.gclient_utils.Error, subprocess2.CalledProcessError):
# The exact exception text varies across git versions so it's not worth
# verifying it. It's fine as long as it throws.
pass
# Manually flush stdout since we can't verify it's content accurately across
# git versions.
sys.stdout.getvalue()
sys.stdout.close()
@unittest.skip('Skipping until crbug.com/670884 is resolved.')
def testUpdateLocked(self):
if not self.enabled:
return
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_path = join(self.base_path, '.git', 'index.lock')
with open(file_path, 'w'):
pass
with self.assertRaises(subprocess2.CalledProcessError):
scm.update(options, (), [])
sys.stdout.close()
def testUpdateLockedBreak(self):
if not self.enabled:
return
options = self.Options()
options.break_repo_locks = True
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_path = join(self.base_path, '.git', 'index.lock')
with open(file_path, 'w'):
pass
scm.update(options, (), [])
self.assertRegexpMatches(sys.stdout.getvalue(),
"breaking lock.*\.git/index\.lock")
self.assertFalse(os.path.exists(file_path))
sys.stdout.close()
def testUpdateConflict(self):
if not self.enabled:
return
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_path = join(self.base_path, 'b')
with open(file_path, 'w') as f:
f.writelines('conflict\n')
scm._Run(['commit', '-am', 'test'], options)
scm._AskForData = self._GetAskForDataCallback(
'Cannot fast-forward merge, attempt to rebase? '
'(y)es / (q)uit / (s)kip : ', 'y')
with self.assertRaises(gclient_scm.gclient_utils.Error) as e:
scm.update(options, (), [])
self.assertEqual(
e.exception.args[0],
'Conflict while rebasing this branch.\n'
'Fix the conflict and run gclient again.\n'
'See \'man git-rebase\' for details.\n')
with self.assertRaises(gclient_scm.gclient_utils.Error) as e:
scm.update(options, (), [])
self.assertEqual(
e.exception.args[0],
'\n____ . at refs/remotes/origin/master\n'
'\tYou have unstaged changes.\n'
'\tPlease commit, stash, or reset.\n')
sys.stdout.close()
def testRevinfo(self):
if not self.enabled:
return
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
rev_info = scm.revinfo(options, (), None)
self.assertEqual(rev_info, '069c602044c5388d2d15c3f875b057c852003458')
def testMirrorPushUrl(self):
if not self.enabled:
return
fakes = fake_repos.FakeRepos()
fakes.set_up_git()
self.url = fakes.git_base + 'repo_1'
self.root_dir = fakes.root_dir
self.addCleanup(fake_repos.FakeRepos.tear_down_git, fakes)
mirror = tempfile.mkdtemp()
self.addCleanup(rmtree, mirror)
# This should never happen, but if it does, it'd render the other assertions
# in this test meaningless.
self.assertFalse(self.url.startswith(mirror))
git_cache.Mirror.SetCachePath(mirror)
self.addCleanup(git_cache.Mirror.SetCachePath, None)
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir, self.relpath)
self.assertIsNotNone(scm._GetMirror(self.url, options))
scm.update(options, (), [])
fetch_url = scm._Capture(['remote', 'get-url', 'origin'])
self.assertTrue(
fetch_url.startswith(mirror),
msg='\n'.join([
'Repository fetch url should be in the git cache mirror directory.',
' fetch_url: %s' % fetch_url,
' mirror: %s' % mirror]))
push_url = scm._Capture(['remote', 'get-url', '--push', 'origin'])
self.assertEqual(push_url, self.url)
sys.stdout.close()
class ManagedGitWrapperTestCaseMock(unittest.TestCase):
class OptionsObject(object):
def __init__(self, verbose=False, revision=None, force=False):
self.verbose = verbose
self.revision = revision
self.deps_os = None
self.force = force
self.reset = False
self.nohooks = False
self.break_repo_locks = False
# TODO(maruel): Test --jobs > 1.
self.jobs = 1
self.patch_ref = None
self.patch_repo = None
self.rebase_patch_ref = True
def Options(self, *args, **kwargs):
return self.OptionsObject(*args, **kwargs)
def checkstdout(self, expected):
value = sys.stdout.getvalue()
sys.stdout.close()
# pylint: disable=no-member
self.assertEqual(expected, strip_timestamps(value))
def setUp(self):
self.fake_hash_1 = 't0ta11yf4k3'
self.fake_hash_2 = '3v3nf4k3r'
self.url = 'git://foo'
self.root_dir = '/tmp' if sys.platform != 'win32' else 't:\\tmp'
self.relpath = 'fake'
self.base_path = os.path.join(self.root_dir, self.relpath)
self.backup_base_path = os.path.join(self.root_dir,
'old_%s.git' % self.relpath)
mock.patch('gclient_scm.scm.GIT.ApplyEnvVars').start()
mock.patch('gclient_scm.GitWrapper._CheckMinVersion').start()
mock.patch('gclient_scm.GitWrapper._Fetch').start()
mock.patch('gclient_scm.GitWrapper._DeleteOrMove').start()
mock.patch('sys.stdout', StringIO()).start()
self.addCleanup(mock.patch.stopall)
@mock.patch('scm.GIT.IsValidRevision')
@mock.patch('os.path.isdir', lambda _: True)
def testGetUsableRevGit(self, mockIsValidRevision):
# pylint: disable=no-member
options = self.Options(verbose=True)
mockIsValidRevision.side_effect = lambda cwd, rev: rev != '1'
git_scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
# A [fake] git sha1 with a git repo should work (this is in the case that
# the LKGR gets flipped to git sha1's some day).
self.assertEqual(git_scm.GetUsableRev(self.fake_hash_1, options),
self.fake_hash_1)
# An SVN rev with an existing purely git repo should raise an exception.
self.assertRaises(gclient_scm.gclient_utils.Error,
git_scm.GetUsableRev, '1', options)
@mock.patch('gclient_scm.GitWrapper._Clone')
@mock.patch('os.path.isdir')
@mock.patch('os.path.exists')
@mock.patch('subprocess2.check_output')
def testUpdateNoDotGit(
self, mockCheckOutput, mockExists, mockIsdir, mockClone):
mockIsdir.side_effect = lambda path: path == self.base_path
mockExists.side_effect = lambda path: path == self.base_path
mockCheckOutput.return_value = b''
options = self.Options()
scm = gclient_scm.GitWrapper(
self.url, self.root_dir, self.relpath)
scm.update(options, None, [])
env = gclient_scm.scm.GIT.ApplyEnvVars({})
self.assertEqual(
mockCheckOutput.mock_calls,
[
mock.call(
['git', '-c', 'core.quotePath=false', 'ls-files'],
cwd=self.base_path, env=env, stderr=-1),
mock.call(
['git', 'rev-parse', '--verify', 'HEAD'],
cwd=self.base_path, env=env, stderr=-1),
])
mockClone.assert_called_with(
'refs/remotes/origin/master', self.url, options)
self.checkstdout('\n')
@mock.patch('gclient_scm.GitWrapper._Clone')
@mock.patch('os.path.isdir')
@mock.patch('os.path.exists')
@mock.patch('subprocess2.check_output')
def testUpdateConflict(
self, mockCheckOutput, mockExists, mockIsdir, mockClone):
mockIsdir.side_effect = lambda path: path == self.base_path
mockExists.side_effect = lambda path: path == self.base_path
mockCheckOutput.return_value = b''
mockClone.side_effect = [
gclient_scm.subprocess2.CalledProcessError(
None, None, None, None, None),
None,
]
options = self.Options()
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
scm.update(options, None, [])
env = gclient_scm.scm.GIT.ApplyEnvVars({})
self.assertEqual(
mockCheckOutput.mock_calls,
[
mock.call(
['git', '-c', 'core.quotePath=false', 'ls-files'],
cwd=self.base_path, env=env, stderr=-1),
mock.call(
['git', 'rev-parse', '--verify', 'HEAD'],
cwd=self.base_path, env=env, stderr=-1),
])
mockClone.assert_called_with(
'refs/remotes/origin/master', self.url, options)
self.checkstdout('\n')
class UnmanagedGitWrapperTestCase(BaseGitWrapperTestCase):
def checkInStdout(self, expected):
value = sys.stdout.getvalue()
sys.stdout.close()
# pylint: disable=no-member
self.assertIn(expected, value)
def checkNotInStdout(self, expected):
value = sys.stdout.getvalue()
sys.stdout.close()
# pylint: disable=no-member
self.assertNotIn(expected, value)
def getCurrentBranch(self):
# Returns name of current branch or HEAD for detached HEAD
branch = gclient_scm.scm.GIT.Capture(['rev-parse', '--abbrev-ref', 'HEAD'],
cwd=self.base_path)
if branch == 'HEAD':
return None
return branch
def testUpdateClone(self):
if not self.enabled:
return
options = self.Options()
origin_root_dir = self.root_dir
self.root_dir = tempfile.mkdtemp()
self.relpath = '.'
self.base_path = join(self.root_dir, self.relpath)
scm = gclient_scm.GitWrapper(origin_root_dir,
self.root_dir,
self.relpath)
expected_file_list = [join(self.base_path, "a"),
join(self.base_path, "b")]
file_list = []
options.revision = 'unmanaged'
scm.update(options, (), file_list)
self.assertEqual(file_list, expected_file_list)
self.assertEqual(scm.revinfo(options, (), None),
'069c602044c5388d2d15c3f875b057c852003458')
# indicates detached HEAD
self.assertEqual(self.getCurrentBranch(), None)
self.checkInStdout(
'Checked out refs/remotes/origin/master to a detached HEAD')
rmtree(origin_root_dir)
def testUpdateCloneOnCommit(self):
if not self.enabled:
return
options = self.Options()
origin_root_dir = self.root_dir
self.root_dir = tempfile.mkdtemp()
self.relpath = '.'
self.base_path = join(self.root_dir, self.relpath)
url_with_commit_ref = origin_root_dir +\
'@a7142dc9f0009350b96a11f372b6ea658592aa95'
scm = gclient_scm.GitWrapper(url_with_commit_ref,
self.root_dir,
self.relpath)
expected_file_list = [join(self.base_path, "a"),
join(self.base_path, "b")]
file_list = []
options.revision = 'unmanaged'
scm.update(options, (), file_list)
self.assertEqual(file_list, expected_file_list)
self.assertEqual(scm.revinfo(options, (), None),
'a7142dc9f0009350b96a11f372b6ea658592aa95')
# indicates detached HEAD
self.assertEqual(self.getCurrentBranch(), None)
self.checkInStdout(
'Checked out a7142dc9f0009350b96a11f372b6ea658592aa95 to a detached HEAD')
rmtree(origin_root_dir)
def testUpdateCloneOnBranch(self):
if not self.enabled:
return
options = self.Options()
origin_root_dir = self.root_dir
self.root_dir = tempfile.mkdtemp()
self.relpath = '.'
self.base_path = join(self.root_dir, self.relpath)
url_with_branch_ref = origin_root_dir + '@feature'
scm = gclient_scm.GitWrapper(url_with_branch_ref,
self.root_dir,
self.relpath)
expected_file_list = [join(self.base_path, "a"),
join(self.base_path, "b"),
join(self.base_path, "c")]
file_list = []
options.revision = 'unmanaged'
scm.update(options, (), file_list)
self.assertEqual(file_list, expected_file_list)
self.assertEqual(scm.revinfo(options, (), None),
'9a51244740b25fa2ded5252ca00a3178d3f665a9')
# indicates detached HEAD
self.assertEqual(self.getCurrentBranch(), None)
self.checkInStdout(
'Checked out 9a51244740b25fa2ded5252ca00a3178d3f665a9 '
'to a detached HEAD')
rmtree(origin_root_dir)
def testUpdateCloneOnFetchedRemoteBranch(self):
if not self.enabled:
return
options = self.Options()
origin_root_dir = self.root_dir
self.root_dir = tempfile.mkdtemp()
self.relpath = '.'
self.base_path = join(self.root_dir, self.relpath)
url_with_branch_ref = origin_root_dir + '@refs/remotes/origin/feature'
scm = gclient_scm.GitWrapper(url_with_branch_ref,
self.root_dir,
self.relpath)
expected_file_list = [join(self.base_path, "a"),
join(self.base_path, "b"),
join(self.base_path, "c")]
file_list = []
options.revision = 'unmanaged'
scm.update(options, (), file_list)
self.assertEqual(file_list, expected_file_list)
self.assertEqual(scm.revinfo(options, (), None),
'9a51244740b25fa2ded5252ca00a3178d3f665a9')
# indicates detached HEAD
self.assertEqual(self.getCurrentBranch(), None)
self.checkInStdout(
'Checked out refs/remotes/origin/feature to a detached HEAD')
rmtree(origin_root_dir)
def testUpdateCloneOnTrueRemoteBranch(self):
if not self.enabled:
return
options = self.Options()
origin_root_dir = self.root_dir
self.root_dir = tempfile.mkdtemp()
self.relpath = '.'
self.base_path = join(self.root_dir, self.relpath)
url_with_branch_ref = origin_root_dir + '@refs/heads/feature'
scm = gclient_scm.GitWrapper(url_with_branch_ref,
self.root_dir,
self.relpath)
expected_file_list = [join(self.base_path, "a"),
join(self.base_path, "b"),
join(self.base_path, "c")]
file_list = []
options.revision = 'unmanaged'
scm.update(options, (), file_list)
self.assertEqual(file_list, expected_file_list)
self.assertEqual(scm.revinfo(options, (), None),
'9a51244740b25fa2ded5252ca00a3178d3f665a9')
# @refs/heads/feature is AKA @refs/remotes/origin/feature in the clone, so
# should be treated as such by gclient.
# TODO(mmoss): Though really, we should only allow DEPS to specify branches
# as they are known in the upstream repo, since the mapping into the local
# repo can be modified by users (or we might even want to change the gclient
# defaults at some point). But that will take more work to stop using
# refs/remotes/ everywhere that we do (and to stop assuming a DEPS ref will
# always resolve locally, like when passing them to show-ref or rev-list).
self.assertEqual(self.getCurrentBranch(), None)
self.checkInStdout(
'Checked out refs/remotes/origin/feature to a detached HEAD')
rmtree(origin_root_dir)
def testUpdateUpdate(self):
if not self.enabled:
return
options = self.Options()
expected_file_list = []
scm = gclient_scm.GitWrapper(self.url, self.root_dir,
self.relpath)
file_list = []
options.revision = 'unmanaged'
scm.update(options, (), file_list)
self.assertEqual(file_list, expected_file_list)
self.assertEqual(scm.revinfo(options, (), None),
'069c602044c5388d2d15c3f875b057c852003458')
self.checkstdout('________ unmanaged solution; skipping .\n')
class CipdWrapperTestCase(unittest.TestCase):
def setUp(self):
# Create this before setting up mocks.
self._cipd_root_dir = tempfile.mkdtemp()
self._workdir = tempfile.mkdtemp()
self._cipd_instance_url = 'https://chrome-infra-packages.appspot.com'
self._cipd_root = gclient_scm.CipdRoot(
self._cipd_root_dir,
self._cipd_instance_url)
self._cipd_packages = [
self._cipd_root.add_package('f', 'foo_package', 'foo_version'),
self._cipd_root.add_package('b', 'bar_package', 'bar_version'),
self._cipd_root.add_package('b', 'baz_package', 'baz_version'),
]
mock.patch('tempfile.mkdtemp', lambda: self._workdir).start()
mock.patch('gclient_scm.CipdRoot.add_package').start()
mock.patch('gclient_scm.CipdRoot.clobber').start()
mock.patch('gclient_scm.CipdRoot.ensure').start()
self.addCleanup(mock.patch.stopall)
def tearDown(self):
rmtree(self._cipd_root_dir)
rmtree(self._workdir)
def createScmWithPackageThatSatisfies(self, condition):
return gclient_scm.CipdWrapper(
url=self._cipd_instance_url,
root_dir=self._cipd_root_dir,
relpath='fake_relpath',
root=self._cipd_root,
package=self.getPackageThatSatisfies(condition))
def getPackageThatSatisfies(self, condition):
for p in self._cipd_packages:
if condition(p):
return p
self.fail('Unable to find a satisfactory package.')
def testRevert(self):
"""Checks that revert does nothing."""
scm = self.createScmWithPackageThatSatisfies(lambda _: True)
scm.revert(None, (), [])
@mock.patch('gclient_scm.gclient_utils.CheckCallAndFilter')
@mock.patch('gclient_scm.gclient_utils.rmtree')
def testRevinfo(self, mockRmtree, mockCheckCallAndFilter):
"""Checks that revinfo uses the JSON from cipd describe."""
scm = self.createScmWithPackageThatSatisfies(lambda _: True)
expected_revinfo = '0123456789abcdef0123456789abcdef01234567'
json_contents = {
'result': {
'pin': {
'instance_id': expected_revinfo,
}
}
}
describe_json_path = join(self._workdir, 'describe.json')
with open(describe_json_path, 'w') as describe_json:
json.dump(json_contents, describe_json)
revinfo = scm.revinfo(None, (), [])
self.assertEqual(revinfo, expected_revinfo)
mockRmtree.assert_called_with(self._workdir)
mockCheckCallAndFilter.assert_called_with([
'cipd', 'describe', 'foo_package',
'-log-level', 'error',
'-version', 'foo_version',
'-json-output', describe_json_path,
])
def testUpdate(self):
"""Checks that update does nothing."""
scm = self.createScmWithPackageThatSatisfies(lambda _: True)
scm.update(None, (), [])
class GerritChangesFakeRepo(fake_repos.FakeReposBase):
def populateGit(self):
# Creates a tree that looks like this:
#
# 6 refs/changes/35/1235/1
# |
# 5 refs/changes/34/1234/1
# |
# 1--2--3--4 refs/heads/master
# | |
# | 11(5)--12 refs/heads/master-with-5
# |
# 7--8--9 refs/heads/feature
# |
# 10 refs/changes/36/1236/1
#
self._commit_git('repo_1', {'commit 1': 'touched'})
self._commit_git('repo_1', {'commit 2': 'touched'})
self._commit_git('repo_1', {'commit 3': 'touched'})
self._commit_git('repo_1', {'commit 4': 'touched'})
self._create_ref('repo_1', 'refs/heads/master', 4)
# Create a change on top of commit 3 that consists of two commits.
self._commit_git('repo_1',
{'commit 5': 'touched',
'change': '1234'},
base=3)
self._create_ref('repo_1', 'refs/changes/34/1234/1', 5)
self._commit_git('repo_1',
{'commit 6': 'touched',
'change': '1235'})
self._create_ref('repo_1', 'refs/changes/35/1235/1', 6)
# Create a refs/heads/feature branch on top of commit 2, consisting of three
# commits.
self._commit_git('repo_1', {'commit 7': 'touched'}, base=2)
self._commit_git('repo_1', {'commit 8': 'touched'})
self._commit_git('repo_1', {'commit 9': 'touched'})
self._create_ref('repo_1', 'refs/heads/feature', 9)
# Create a change of top of commit 8.
self._commit_git('repo_1',
{'commit 10': 'touched',
'change': '1236'},
base=8)
self._create_ref('repo_1', 'refs/changes/36/1236/1', 10)
# Create a refs/heads/master-with-5 on top of commit 3 which is a branch
# where refs/changes/34/1234/1 (commit 5) has already landed as commit 11.
self._commit_git('repo_1',
# This is really commit 11, but has the changes of commit 5
{'commit 5': 'touched',
'change': '1234'},
base=3)
self._commit_git('repo_1', {'commit 12': 'touched'})
self._create_ref('repo_1', 'refs/heads/master-with-5', 12)
class GerritChangesTest(fake_repos.FakeReposTestBase):
FAKE_REPOS_CLASS = GerritChangesFakeRepo
def setUp(self):
super(GerritChangesTest, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
self.options = BaseGitWrapperTestCase.OptionsObject()
self.url = self.git_base + 'repo_1'
self.mirror = None
def setUpMirror(self):
self.mirror = tempfile.mkdtemp()
git_cache.Mirror.SetCachePath(self.mirror)
self.addCleanup(rmtree, self.mirror)
self.addCleanup(git_cache.Mirror.SetCachePath, None)
def assertCommits(self, commits):
"""Check that all, and only |commits| are present in the current checkout.
"""
for i in commits:
name = os.path.join(self.root_dir, 'commit ' + str(i))
self.assertTrue(os.path.exists(name), 'Commit not found: %s' % name)
all_commits = set(range(1, len(self.FAKE_REPOS.git_hashes['repo_1'])))
for i in all_commits - set(commits):
name = os.path.join(self.root_dir, 'commit ' + str(i))
self.assertFalse(os.path.exists(name), 'Unexpected commit: %s' % name)
def testCanCloneGerritChange(self):
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
self.options.revision = 'refs/changes/35/1235/1'
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 6), self.gitrevparse(self.root_dir))
def testCanSyncToGerritChange(self):
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
self.options.revision = self.githash('repo_1', 1)
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 1), self.gitrevparse(self.root_dir))
self.options.revision = 'refs/changes/35/1235/1'
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 6), self.gitrevparse(self.root_dir))
def testCanCloneGerritChangeMirror(self):
self.setUpMirror()
self.testCanCloneGerritChange()
def testCanSyncToGerritChangeMirror(self):
self.setUpMirror()
self.testCanSyncToGerritChange()
def testAppliesPatchOnTopOfMasterByDefault(self):
"""Test the default case, where we apply a patch on top of master."""
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
# Make sure we don't specify a revision.
self.options.revision = None
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 4), self.gitrevparse(self.root_dir))
scm.apply_patch_ref(
self.url, 'refs/changes/35/1235/1', 'refs/heads/master', self.options,
file_list)
self.assertCommits([1, 2, 3, 4, 5, 6])
self.assertEqual(self.githash('repo_1', 4), self.gitrevparse(self.root_dir))
def testCheckoutOlderThanPatchBase(self):
"""Test applying a patch on an old checkout.
We first checkout commit 1, and try to patch refs/changes/35/1235/1, which
contains commits 5 and 6, and is based on top of commit 3.
The final result should contain commits 1, 5 and 6, but not commits 2 or 3.
"""
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
# Sync to commit 1
self.options.revision = self.githash('repo_1', 1)
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 1), self.gitrevparse(self.root_dir))
# Apply the change on top of that.
scm.apply_patch_ref(
self.url, 'refs/changes/35/1235/1', 'refs/heads/master', self.options,
file_list)
self.assertCommits([1, 5, 6])
self.assertEqual(self.githash('repo_1', 1), self.gitrevparse(self.root_dir))
def testCheckoutOriginFeature(self):
"""Tests that we can apply a patch on a branch other than master."""
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
# Sync to remote's refs/heads/feature
self.options.revision = 'refs/heads/feature'
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 9), self.gitrevparse(self.root_dir))
# Apply the change on top of that.
scm.apply_patch_ref(
self.url, 'refs/changes/36/1236/1', 'refs/heads/feature', self.options,
file_list)
self.assertCommits([1, 2, 7, 8, 9, 10])
self.assertEqual(self.githash('repo_1', 9), self.gitrevparse(self.root_dir))
def testCheckoutOriginFeatureOnOldRevision(self):
"""Tests that we can apply a patch on an old checkout, on a branch other
than master."""
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
# Sync to remote's refs/heads/feature on an old revision
self.options.revision = self.githash('repo_1', 7)
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 7), self.gitrevparse(self.root_dir))
# Apply the change on top of that.
scm.apply_patch_ref(
self.url, 'refs/changes/36/1236/1', 'refs/heads/feature', self.options,
file_list)
# We shouldn't have rebased on top of 2 (which is the merge base between
# remote's master branch and the change) but on top of 7 (which is the
# merge base between remote's feature branch and the change).
self.assertCommits([1, 2, 7, 10])
self.assertEqual(self.githash('repo_1', 7), self.gitrevparse(self.root_dir))
def testCheckoutOriginFeaturePatchBranch(self):
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
# Sync to the hash instead of remote's refs/heads/feature.
self.options.revision = self.githash('repo_1', 9)
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 9), self.gitrevparse(self.root_dir))
# Apply refs/changes/34/1234/1, created for remote's master branch on top of
# remote's feature branch.
scm.apply_patch_ref(
self.url, 'refs/changes/35/1235/1', 'refs/heads/master', self.options,
file_list)
# Commits 5 and 6 are part of the patch, and commits 1, 2, 7, 8 and 9 are
# part of remote's feature branch.
self.assertCommits([1, 2, 5, 6, 7, 8, 9])
self.assertEqual(self.githash('repo_1', 9), self.gitrevparse(self.root_dir))
def testDoesntRebasePatchMaster(self):
"""Tests that we can apply a patch without rebasing it.
"""
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
self.options.rebase_patch_ref = False
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 4), self.gitrevparse(self.root_dir))
# Apply the change on top of that.
scm.apply_patch_ref(
self.url, 'refs/changes/35/1235/1', 'refs/heads/master', self.options,
file_list)
self.assertCommits([1, 2, 3, 5, 6])
self.assertEqual(self.githash('repo_1', 4), self.gitrevparse(self.root_dir))
def testDoesntRebasePatchOldCheckout(self):
"""Tests that we can apply a patch without rebasing it on an old checkout.
"""
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
# Sync to commit 1
self.options.revision = self.githash('repo_1', 1)
self.options.rebase_patch_ref = False
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 1), self.gitrevparse(self.root_dir))
# Apply the change on top of that.
scm.apply_patch_ref(
self.url, 'refs/changes/35/1235/1', 'refs/heads/master', self.options,
file_list)
self.assertCommits([1, 2, 3, 5, 6])
self.assertEqual(self.githash('repo_1', 1), self.gitrevparse(self.root_dir))
def testDoesntSoftResetIfNotAskedTo(self):
"""Test that we can apply a patch without doing a soft reset."""
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
self.options.reset_patch_ref = False
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 4), self.gitrevparse(self.root_dir))
scm.apply_patch_ref(
self.url, 'refs/changes/35/1235/1', 'refs/heads/master', self.options,
file_list)
self.assertCommits([1, 2, 3, 4, 5, 6])
# The commit hash after cherry-picking is not known, but it must be
# different from what the repo was synced at before patching.
self.assertNotEqual(self.githash('repo_1', 4),
self.gitrevparse(self.root_dir))
def testRecoversAfterPatchFailure(self):
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
self.options.revision = 'refs/changes/34/1234/1'
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 5), self.gitrevparse(self.root_dir))
# Checkout 'refs/changes/34/1234/1' modifies the 'change' file, so trying to
# patch 'refs/changes/36/1236/1' creates a patch failure.
with self.assertRaises(subprocess2.CalledProcessError) as cm:
scm.apply_patch_ref(
self.url, 'refs/changes/36/1236/1', 'refs/heads/master', self.options,
file_list)
self.assertEqual(cm.exception.cmd[:2], ['git', 'cherry-pick'])
self.assertIn(b'error: could not apply', cm.exception.stderr)
# Try to apply 'refs/changes/35/1235/1', which doesn't have a merge
# conflict.
scm.apply_patch_ref(
self.url, 'refs/changes/35/1235/1', 'refs/heads/master', self.options,
file_list)
self.assertCommits([1, 2, 3, 5, 6])
self.assertEqual(self.githash('repo_1', 5), self.gitrevparse(self.root_dir))
def testIgnoresAlreadyMergedCommits(self):
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
self.options.revision = 'refs/heads/master-with-5'
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 12),
self.gitrevparse(self.root_dir))
# When we try 'refs/changes/35/1235/1' on top of 'refs/heads/feature',
# 'refs/changes/34/1234/1' will be an empty commit, since the changes were
# already present in the tree as commit 11.
# Make sure we deal with this gracefully.
scm.apply_patch_ref(
self.url, 'refs/changes/35/1235/1', 'refs/heads/feature', self.options,
file_list)
self.assertCommits([1, 2, 3, 5, 6, 12])
self.assertEqual(self.githash('repo_1', 12),
self.gitrevparse(self.root_dir))
def testRecoversFromExistingCherryPick(self):
scm = gclient_scm.GitWrapper(self.url, self.root_dir, '.')
file_list = []
self.options.revision = 'refs/changes/34/1234/1'
scm.update(self.options, None, file_list)
self.assertEqual(self.githash('repo_1', 5), self.gitrevparse(self.root_dir))
# Checkout 'refs/changes/34/1234/1' modifies the 'change' file, so trying to
# cherry-pick 'refs/changes/36/1236/1' raises an error.
scm._Run(['fetch', 'origin', 'refs/changes/36/1236/1'], self.options)
with self.assertRaises(subprocess2.CalledProcessError) as cm:
scm._Run(['cherry-pick', 'FETCH_HEAD'], self.options)
self.assertEqual(cm.exception.cmd[:2], ['git', 'cherry-pick'])
# Try to apply 'refs/changes/35/1235/1', which doesn't have a merge
# conflict.
scm.apply_patch_ref(
self.url, 'refs/changes/35/1235/1', 'refs/heads/master', self.options,
file_list)
self.assertCommits([1, 2, 3, 5, 6])
self.assertEqual(self.githash('repo_1', 5), self.gitrevparse(self.root_dir))
if __name__ == '__main__':
level = logging.DEBUG if '-v' in sys.argv else logging.FATAL
logging.basicConfig(
level=level,
format='%(asctime).19s %(levelname)s %(filename)s:'
'%(lineno)s %(message)s')
unittest.main()
# vim: ts=2:sw=2:tw=80:et:
| 35.610119
| 80
| 0.647367
|
6df241ee328db0bb0b89d2023ec69c8f60af9c79
| 1,480
|
py
|
Python
|
tacker/db/migration/alembic_migrations/versions/f5c1c3b0f6b4_set_default_value_for_deleted_at.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | 116
|
2015-10-18T02:57:08.000Z
|
2022-03-15T04:09:18.000Z
|
tacker/db/migration/alembic_migrations/versions/f5c1c3b0f6b4_set_default_value_for_deleted_at.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | 6
|
2016-11-07T22:15:54.000Z
|
2021-05-09T06:13:08.000Z
|
tacker/db/migration/alembic_migrations/versions/f5c1c3b0f6b4_set_default_value_for_deleted_at.py
|
takahashi-tsc/tacker
|
a0ae01a13dcc51bb374060adcbb4fd484ab37156
|
[
"Apache-2.0"
] | 166
|
2015-10-20T15:31:52.000Z
|
2021-11-12T08:39:49.000Z
|
# Copyright 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""set default value for deleted_at
Revision ID: f5c1c3b0f6b4
Revises: 31acbaeb8299
Create Date: 2017-06-23 03:03:12.200270
"""
# flake8: noqa: E402
# revision identifiers, used by Alembic.
revision = 'f5c1c3b0f6b4'
down_revision = '31acbaeb8299'
from alembic import op
from datetime import datetime
def upgrade(active_plugins=None, options=None):
op.execute(("UPDATE vnfd set deleted_at='%s'"
" WHERE deleted_at is NULL") % datetime.min)
op.execute(("UPDATE vnf set deleted_at='%s'"
" WHERE deleted_at is NULL") % datetime.min)
op.execute(("UPDATE vims set deleted_at='%s'"
" WHERE deleted_at is NULL") % datetime.min)
op.execute(("UPDATE ns set deleted_at='%s'"
" WHERE deleted_at is NULL") % datetime.min)
op.execute(("UPDATE nsd set deleted_at='%s'"
" WHERE deleted_at is NULL") % datetime.min)
| 30.204082
| 78
| 0.70473
|
2ddb8fd7fcb206b0c97ffbe520801eb6898a69e3
| 3,401
|
py
|
Python
|
lib/fontv/settings.py
|
source-foundry/font-v
|
590c9f540cf73ffccc2a1907b186d16b296729e5
|
[
"MIT"
] | 14
|
2017-09-15T23:29:03.000Z
|
2021-12-06T12:48:39.000Z
|
lib/fontv/settings.py
|
source-foundry/font-v
|
590c9f540cf73ffccc2a1907b186d16b296729e5
|
[
"MIT"
] | 135
|
2017-09-05T13:33:29.000Z
|
2022-03-28T08:03:02.000Z
|
lib/fontv/settings.py
|
source-foundry/font-v
|
590c9f540cf73ffccc2a1907b186d16b296729e5
|
[
"MIT"
] | 6
|
2018-10-23T14:49:17.000Z
|
2021-12-01T22:47:37.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Library Name
# ------------------------------------------------------------------------------
lib_name = "font-v"
# ------------------------------------------------------------------------------
# Version Number
# ------------------------------------------------------------------------------
major_version = "1"
minor_version = "0"
patch_version = "5"
# ------------------------------------------------------------------------------
# Help String
# ------------------------------------------------------------------------------
HELP = """====================================================
font-v
Copyright 2018 Christopher Simpkins
MIT License
Source: https://github.com/source-foundry/font-v
====================================================
font-v is a font version string reporting and modification tool for ttf and otf fonts. It is built with the libfv library and supports the OpenFV semantic font versioning specification.
USAGE:
Include a subcommand and desired options in your command line request:
font-v [subcommand] (options) [font file path 1] ([font file path ...])
Subcommands and options:
report - report OpenType name table ID 5 and head table fontRevision records
--dev - include all name table ID 5 x platformID records in report
write - write version number to head table fontRevision records and
version string to name table ID 5 records. The following options
can be used to modify the version string write:
head fontRevision + name ID 5 option:
--ver=[version #] - change version number to `version #` definition
name ID 5 options:
--dev - add development status metadata (mutually exclusive with --rel)
--rel - add release status metadata (mutually exclusive with --dev)
--sha1 - add git commit sha1 short hash state metadata
NOTES:
The write subcommand --dev and --rel flags are mutually exclusive. Include up to one of these options.
For platforms that treat the period as a special shell character, an underscore or dash glyph can be used in place of a period to define the version number on the command line with the `--ver=[version #]` option. This means that 2.001 can be defined with any of the following:
$ font-v write --ver=2.001
$ font-v write --ver=2_001
$ font-v write --ver=2-001
You can include version number, status, and state options in the same request to make all of these modifications simultaneously.
The write subcommand modifies all nameID 5 records identified in the OpenType name table of the font (i.e. across all platformID).
font-v and the underlying libfv library follow the OpenFV semantic font versioning specification. This specification can be viewed at https://github.com/openfv/openfv.
"""
# ------------------------------------------------------------------------------
# Version String
# ------------------------------------------------------------------------------
VERSION = "font-v v" + major_version + "." + minor_version + "." + patch_version
# ------------------------------------------------------------------------------
# Usage String
# ------------------------------------------------------------------------------
USAGE = """
font-v [subcommand] (options) [font file path 1] ([font file path ...])
"""
| 41.47561
| 277
| 0.534549
|
7a309190f32e5b8543148e0ca45aff47bb3af6a9
| 95
|
py
|
Python
|
orderadmin/apps.py
|
jackyicu/django-orderadmin
|
a634c9f447fb8e352077317d9d3f43e40488e47b
|
[
"BSD-3-Clause"
] | null | null | null |
orderadmin/apps.py
|
jackyicu/django-orderadmin
|
a634c9f447fb8e352077317d9d3f43e40488e47b
|
[
"BSD-3-Clause"
] | null | null | null |
orderadmin/apps.py
|
jackyicu/django-orderadmin
|
a634c9f447fb8e352077317d9d3f43e40488e47b
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class OrderadminConfig(AppConfig):
name = 'orderadmin'
| 15.833333
| 34
| 0.768421
|
7ce8dc272c5411f42bdc279378fbb4917b4e00a3
| 1,256
|
py
|
Python
|
release/scripts/addons/add_camera_rigs/composition_guides_menu.py
|
vic3t3chn0/Bforartists
|
7c54a60dd7aa568e20ae7e3778dfef993b61b7b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2020-01-18T22:13:24.000Z
|
2020-01-18T22:13:24.000Z
|
release/scripts/addons/add_camera_rigs/composition_guides_menu.py
|
vic3t3chn0/Bforartists
|
7c54a60dd7aa568e20ae7e3778dfef993b61b7b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/addons/add_camera_rigs/composition_guides_menu.py
|
vic3t3chn0/Bforartists
|
7c54a60dd7aa568e20ae7e3778dfef993b61b7b5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
import bpy
from bpy.types import Menu
class ADD_CAMERA_RIGS_MT_composition_guides_menu(Menu):
bl_label = "Composition Guides"
bl_idname = "ADD_CAMERA_RIGS_MT_composition_guides_menu"
def draw(self, context):
layout = self.layout
activeCameraName = bpy.context.active_object.children[0].name
cam = bpy.data.cameras[bpy.data.objects[activeCameraName].data.name]
layout.prop(cam, "show_safe_areas")
layout.row().separator()
layout.prop(cam, "show_composition_center")
layout.prop(cam, "show_composition_center_diagonal")
layout.prop(cam, "show_composition_golden")
layout.prop(cam, "show_composition_golden_tria_a")
layout.prop(cam, "show_composition_golden_tria_b")
layout.prop(cam, "show_composition_harmony_tri_a")
layout.prop(cam, "show_composition_harmony_tri_b")
layout.prop(cam, "show_composition_thirds")
def draw_item(self, context):
layout = self.layout
layout.menu(CustomMenu.bl_idname)
def register():
bpy.utils.register_class(ADD_CAMERA_RIGS_MT_composition_guides_menu)
def unregister():
bpy.utils.unregister_class(ADD_CAMERA_RIGS_MT_composition_guides_menu)
if __name__ == "__main__":
register()
| 29.904762
| 76
| 0.730892
|
e85a44cf4455afb350e84052870e9a485db0fbee
| 3,350
|
py
|
Python
|
venv/lib/python3.8/site-packages/awscli/customizations/cloudformation/yamlhelper.py
|
sr9dc/DS_Systems_Project_2
|
0b348c1dd300756f732b4ce13e04239036dc601a
|
[
"MIT"
] | 4
|
2022-01-07T13:37:33.000Z
|
2022-03-31T03:21:17.000Z
|
venv/lib/python3.8/site-packages/awscli/customizations/cloudformation/yamlhelper.py
|
sr9dc/DS_Systems_Project_2
|
0b348c1dd300756f732b4ce13e04239036dc601a
|
[
"MIT"
] | 1
|
2022-01-27T04:21:58.000Z
|
2022-01-27T04:21:58.000Z
|
venv/lib/python3.8/site-packages/awscli/customizations/cloudformation/yamlhelper.py
|
sr9dc/DS_Systems_Project_2
|
0b348c1dd300756f732b4ce13e04239036dc601a
|
[
"MIT"
] | null | null | null |
# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.compat import json
from botocore.compat import OrderedDict
import yaml
from yaml.resolver import ScalarNode, SequenceNode
from awscli.compat import six
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
YAML constructor to parse CloudFormation intrinsics.
This will return a dictionary with key being the instrinsic name
"""
# Get the actual tag name excluding the first exclamation
tag = node.tag[1:]
# Some intrinsic functions doesn't support prefix "Fn::"
prefix = "Fn::"
if tag in ["Ref", "Condition"]:
prefix = ""
cfntag = prefix + tag
if tag == "GetAtt" and isinstance(node.value, six.string_types):
# ShortHand notation for !GetAtt accepts Resource.Attribute format
# while the standard notation is to use an array
# [Resource, Attribute]. Convert shorthand to standard format
value = node.value.split(".", 1)
elif isinstance(node, ScalarNode):
# Value of this node is scalar
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
# Value of this node is an array (Ex: [1,2])
value = loader.construct_sequence(node)
else:
# Value of this node is an mapping (ex: {foo: bar})
value = loader.construct_mapping(node)
return {cfntag: value}
def _dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def yaml_dump(dict_to_dump):
"""
Dumps the dictionary as a YAML document
:param dict_to_dump:
:return:
"""
FlattenAliasDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(
dict_to_dump,
default_flow_style=False,
Dumper=FlattenAliasDumper,
)
def _dict_constructor(loader, node):
# Necessary in order to make yaml merge tags work
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
class SafeLoaderWrapper(yaml.SafeLoader):
"""Isolated safe loader to allow for customizations without global changes.
"""
pass
def yaml_parse(yamlstr):
"""Parse a yaml string"""
try:
# PyYAML doesn't support json as well as it should, so if the input
# is actually just json it is better to parse it with the standard
# json parser.
return json.loads(yamlstr, object_pairs_hook=OrderedDict)
except ValueError:
loader = SafeLoaderWrapper
loader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
_dict_constructor)
loader.add_multi_constructor("!", intrinsics_multi_constructor)
return yaml.load(yamlstr, loader)
class FlattenAliasDumper(yaml.SafeDumper):
def ignore_aliases(self, data):
return True
| 31.308411
| 79
| 0.697015
|
3715cb36e6511a4f1793be1aad5112b33225ce16
| 4,888
|
py
|
Python
|
models/guided_pix2pix_model.py
|
MHC-F2V-Research/Image-Reconstruction-Ref2
|
0a124365fc14708bca092f68205987885c91212a
|
[
"BSD-3-Clause"
] | 197
|
2019-10-24T19:51:17.000Z
|
2022-01-29T09:23:49.000Z
|
models/guided_pix2pix_model.py
|
andrewjong/Guided-pix2pix
|
0c6a7b5fde50ad7ea4fb20a6136fc6cb6c4e5542
|
[
"BSD-3-Clause"
] | 10
|
2019-10-26T23:56:47.000Z
|
2021-09-03T13:06:53.000Z
|
models/guided_pix2pix_model.py
|
andrewjong/Guided-pix2pix
|
0c6a7b5fde50ad7ea4fb20a6136fc6cb6c4e5542
|
[
"BSD-3-Clause"
] | 24
|
2019-10-25T06:36:30.000Z
|
2022-02-10T15:07:46.000Z
|
import torch
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from util import util
class GuidedPix2PixModel(BaseModel):
def name(self):
return 'GuidedPix2PixModel'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.opt = opt
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.use_GAN = opt.use_GAN
if self.use_GAN:
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
else:
self.loss_names = ['G_L1']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if self.isTrain:
if self.use_GAN:
self.model_names = ['G', 'D']
else:
self.model_names = ['G']
else: # during test time, only load Gs
self.model_names = ['G']
# load/define networks
self.netG = networks.define_G(input_nc=opt.input_nc, guide_nc=opt.guide_nc, output_nc=opt.output_nc, ngf=opt.ngf, netG=opt.netG, n_layers=opt.n_layers, norm=opt.norm, init_type=opt.init_type, init_gain=opt.init_gain, gpu_ids=self.gpu_ids)
if self.isTrain & self.use_GAN:
use_sigmoid = opt.no_lsgan
self.netD = networks.define_D(input_nc=opt.input_nc + opt.guide_nc + opt.output_nc, ndf=opt.ndf, netD=opt.netD, n_layers_D=opt.n_layers_D,
norm=opt.norm, use_sigmoid=use_sigmoid, init_type=opt.init_type, init_gain=opt.init_gain, gpu_ids=self.gpu_ids)
if self.isTrain:
self.fake_AB_pool = ImagePool(0)
# define loss functions
if self.use_GAN:
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers
self.optimizers = []
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
if self.use_GAN:
self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
self.real_A = input['A'].to(self.device)
self.real_B = input['B'].to(self.device)
self.guide = input['guide'].to(self.device)
def forward(self):
self.fake_B = self.netG(self.real_A, self.guide)
def get_output(self):
return self.fake_B
def get_class(self):
return self.class_B
def get_current_data(self):
return {'A':self.real_A, 'guide':self.guide, 'B':self.real_B, 'output':self.fake_B}
def backward_D(self):
# Fake
# stop backprop to the generator by detaching fake_B
fake_AB = self.fake_AB_pool.query(torch.cat((self.real_A, self.guide, self.fake_B), 1))
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
real_AB = torch.cat((self.real_A, self.guide, self.real_B), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# Combined loss
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
if self.use_GAN:
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.guide, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
self.loss_G = self.loss_G_GAN + self.loss_G_L1
else:
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B)
self.loss_G = self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
if self.use_GAN:
# update D
self.set_requires_grad(self.netD, True)
self.optimizer_D.zero_grad()
self.backward_D()
self.optimizer_D.step()
# update G
self.set_requires_grad(self.netD, False)
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
else:
# update G
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
| 38.793651
| 246
| 0.596768
|
0b8e0dda873a634c155a3abd50ce71b567367d29
| 4,633
|
py
|
Python
|
openpose/model/roi_heads/uv_head/inference.py
|
leehsiu/pyopenpose
|
c4feef04a9e563fb91e18f745bc187c6f2aeb72c
|
[
"MIT"
] | null | null | null |
openpose/model/roi_heads/uv_head/inference.py
|
leehsiu/pyopenpose
|
c4feef04a9e563fb91e18f745bc187c6f2aeb72c
|
[
"MIT"
] | null | null | null |
openpose/model/roi_heads/uv_head/inference.py
|
leehsiu/pyopenpose
|
c4feef04a9e563fb91e18f745bc187c6f2aeb72c
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
class UVPostProcessor(nn.Module):
def __init__(self, keypointer=None):
super(UVPostProcessor, self).__init__()
self.keypointer = keypointer
def forward(self, x, boxes):
mask_prob = x
if self.keypointer:
mask_prob = self.keypointer(x, boxes)
assert len(boxes) == 1, "Only non-batched inference supported for now"
results = []
bbox = BoxList(boxes[0].bbox, boxes[0].size, mode="xyxy")
for field in boxes[0].fields():
bbox.add_field(field, boxes[0].get_field(field))
bbox.add_field("uv_index", mask_prob[0])
bbox.add_field("uv_I", mask_prob[1])
bbox.add_field("uv_U", mask_prob[2])
bbox.add_field("uv_V", mask_prob[3])
results.append(bbox)
return results
# TODO remove and use only the Keypointer
import numpy as np
import cv2
def heatmaps_to_uvs(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = 0 # cfg.KRCNN.INFERENCE_MIN_SIZE
outputs = []
for i in range(rois.shape[0]):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
roi_map = cv2.resize(
maps[i], (roi_map_width, roi_map_height), interpolation=cv2.INTER_CUBIC
)
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
outputs.append(roi_map)
return outputs
from openpose.structures.bounding_box import BoxList
from openpose.structures.keypoint import PersonKeypoints
class UVInferencer(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, padding=0):
self.padding = padding
def __call__(self, masks, boxes):
# TODO do this properly
if isinstance(boxes, BoxList):
boxes = [boxes]
assert len(boxes) == 1
#index, I, U, V
result = []
uv_index_list = heatmaps_to_uvs(
masks[0].detach().cpu().numpy(), boxes[0].bbox.detach().cpu().numpy()
)
uv_index = []
for index_val in uv_index_list:
index_val = np.argmax(index_val,axis=0)
uv_index.append(index_val)
uv_I_list = heatmaps_to_uvs(
masks[1].detach().cpu().numpy(), boxes[0].bbox.detach().cpu().numpy()
)
uv_I = []
for index_val,I_val in zip(uv_index,uv_I_list):
I_val = np.argmax(I_val,axis=0)
I_val[index_val==0] = 0
uv_I.append(I_val)
#based on uv_index
uv_U_list = heatmaps_to_uvs(
masks[2].detach().cpu().numpy(), boxes[0].bbox.detach().cpu().numpy()
)
uv_V_list = heatmaps_to_uvs(
masks[3].detach().cpu().numpy(), boxes[0].bbox.detach().cpu().numpy()
)
uv_U = []
uv_V = []
for I_val,U_val,V_val in zip(uv_I,uv_U_list,uv_V_list):
uv_U_cond = np.zeros_like(I_val,dtype=np.float32)
uv_V_cond = np.zeros_like(I_val,dtype=np.float32)
for ipart in range(1,25):
uv_U_cond[I_val==ipart] = U_val[ipart][I_val==ipart]
uv_V_cond[I_val==ipart] = V_val[ipart][I_val==ipart]
uv_U.append(uv_U_cond)
uv_V.append(uv_V_cond)
result.append(uv_index)
result.append(uv_I)
result.append(uv_U)
result.append(uv_V)
return result
def make_roi_uv_post_processor(cfg):
uvInferencer = UVInferencer()
uv_post_processor = UVPostProcessor(uvInferencer)
return uv_post_processor
| 31.951724
| 83
| 0.60695
|
4d9970b82413e179ad9199bda5f91245c1fd980c
| 8,879
|
py
|
Python
|
vatreturn.py
|
everknow/vatreturn
|
b35f8a5b5d1ab6993f8ed507b9bd55cf5675d8d1
|
[
"MIT"
] | null | null | null |
vatreturn.py
|
everknow/vatreturn
|
b35f8a5b5d1ab6993f8ed507b9bd55cf5675d8d1
|
[
"MIT"
] | null | null | null |
vatreturn.py
|
everknow/vatreturn
|
b35f8a5b5d1ab6993f8ed507b9bd55cf5675d8d1
|
[
"MIT"
] | null | null | null |
from functools import wraps
import json
import os
import requests
import datetime
from flask import Flask, redirect, url_for
from flask import send_from_directory
from flask import render_template, g
from flask import request
from flask import session
from hmrc_provider import make_hmrc_blueprint, hmrc
from urllib.parse import unquote, quote
import pandas as pd
import logging
app = Flask(__name__, static_url_path='')
app.secret_key = os.environ.get("FLASK_SECRET_KEY", "supersekrit")
app.config["HMRC_OAUTH_CLIENT_ID"] = os.environ.get("HMRC_OAUTH_CLIENT_ID")
app.config["HMRC_OAUTH_CLIENT_SECRET"] = os.environ.get("HMRC_OAUTH_CLIENT_SECRET")
app.config["HMRC_API_HOST"] = os.environ.get("HMRC_API_HOST")
hmrc_bp = make_hmrc_blueprint(
api_host=app.config['HMRC_API_HOST'],
scope='read:vat write:vat',
client_id=app.config["HMRC_OAUTH_CLIENT_ID"],
client_secret=app.config["HMRC_OAUTH_CLIENT_SECRET"],
redirect_to="obligations"
)
app.register_blueprint(
hmrc_bp,
url_prefix="/login",)
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not hmrc.authorized:
return redirect(url_for("hmrc.login"))
else:
if 'hmrc_vat_number' not in session:
return redirect(url_for('get_vat_number', next=request.url))
return f(*args, **kwargs)
return decorated_function
@app.route("/privacy")
def privacy():
return render_template('privacy.html')
@app.route("/making_tax_digital")
def making_tax_digital():
return render_template('making_tax_digital.html')
@app.route("/tandc")
def tandc():
return render_template('tandc.html')
@app.route("/get_vat_number", methods=('GET', 'POST',))
def get_vat_number():
if request.method == 'GET':
return render_template('get_vat_number.html')
elif request.method == 'POST':
session['hmrc_vat_number'] = request.form['hmrc_vat_number']
return redirect(request.args.get('next'))
@app.route("/")
def index():
return render_template('index.html')
def get_fraud_headers():
# These should all be in the request, mostly because they've been
# injected into any form as hidden fields by javascript
headers = {
'Gov-Client-Connection-Method': 'WEB_APP_VIA_SERVER',
'Gov-Client-Timezone': request.cookies.get(
'user_timezone', None),
'Gov-Client-Window-Size': request.cookies.get(
'client_window', None),
'Gov-Client-Browser-JS-User-Agent': unquote( request.cookies.get(
'client_user_agent', None) ),
'Gov-Client-Browser-Plugins': ",".join(map(quote, unquote(request.cookies.get(
'client_browser_plugins', None)[:-1]).split(","))),
'Gov-Client-Browser-Do-Not-Track': request.cookies.get(
'client_do_not_track', None),
'Gov-Client-Screens': request.cookies.get(
'client_screens', None),
'Gov-Client-Local-IPs-Timestamp': request.cookies.get(
'client-local-timestamp', None),
'Gov-Client-Device-ID': os.environ.get("DEVICE_ID"), # was request.cookies.get('device_id', None),
'Gov-Vendor-Version': 'vatreturn-frontend=1.0&vatreturn-backend=1.0',
'Gov-Client-User-IDs': "vatreturn="+os.environ.get("USER_ID"),
'Gov-Client-Local-IPs': os.environ.get("LOCAL_IP"),
'Gov-Vendor-Product-Name': 'vatreturn',
# 'Gov-Vendor-Public-Port': None,
# 'Gov-Vendor-Public-IP': None, # hosted in Heroku, will change
# 'Gov-Client-Public-IP': request.cookies.get(
# 'public_ip', None),
# 'Gov-Client-Public-IP-Timestamp': request.cookies.get(
# 'client-local-timestamp', None),
}
return dict([(k, v) for k, v in headers.items() if v])
def do_action(action, endpoint, params={}, data={}):
url = "/organisations/vat/{}/{}".format(
session['hmrc_vat_number'], endpoint)
if action == 'get':
# logging.warn(url)
response = hmrc.get(url, params=params, headers=get_fraud_headers())
elif action == 'post':
response = hmrc.post(url, json=data, headers=get_fraud_headers())
if not response.ok:
try:
error = response.json()
except json.decoder.JSONDecodeError:
error = response.text
return {'error': error}
else:
return response.json()
@app.route("/obligations")
@login_required
def obligations(show_all=False):
if show_all:
today = datetime.date.today()
from_date = today - datetime.timedelta(days=365*2)
to_date = today
params = {
'from': from_date.strftime("%Y-%m-%d"),
'to': to_date.strftime("%Y-%m-%d")
}
else:
params = {'status': 'O'}
# uncomment the following 3 lines to debug the fraud headers
# logging.warn(json.dumps(get_fraud_headers(), indent = 4))
# r = hmrc.get('test/fraud-prevention-headers/validate', params={}, headers=get_fraud_headers())
# logging.warn(json.dumps(r.json(), indent = 4))
# uncomment the following 2 lines to retrieve a submitted return
# returns = do_action('get', 'returns/18A1', {})
# logging.warn(json.dumps(returns, indent = 4))
obligations = do_action('get', 'obligations', params)
logging.warn(json.dumps(obligations, indent = 4))
if 'error' in obligations:
g.error = obligations['error']
else:
g.obligations = obligations['obligations']
return render_template('obligations.html')
def return_data(period_key, period_end, vat_csv):
# logging.warn(vat_csv)
df = pd.read_csv(vat_csv)
assert list(df.columns) == ["VAT period", "Sales", "Purchases", "VAT rate"]
period = df[df["VAT period"] == period_end]
sales = float(period["Sales"].iloc[0])
purchases = float(period["Purchases"].iloc[0])
vat_rate = float(period["VAT rate"].iloc[0]) / 100
vat_sales = round(sales * vat_rate, 2)
vat_reclaimed = round(purchases * vat_rate, 2)
box_1 = vat_sales # box_1, vat due on sales
box_2 = 0 # vat due on acquisitions
box_3 = box_1 + box_2 # total vat due - calculated: Box1 + Box2
box_4 = vat_reclaimed # vat reclaimed for current period
box_5 = abs(box_3 - box_4) # net vat due (amount to be paid). Calculated: take the figures from Box 3 and Box 4. Deduct the smaller figure from the larger one and use the difference
box_6 = sales # total value sales ex vat
box_7 = purchases # total value purchases ex vat
box_8 = 0 # total value goods supplied ex vat
box_9 = 0 # total acquisitions ex vat
data = {
"periodKey": period_key,
"vatDueSales": box_1,
"vatDueAcquisitions": box_2,
"totalVatDue": box_3,
"vatReclaimedCurrPeriod": box_4,
"netVatDue": box_5,
"totalValueSalesExVAT": box_6,
"totalValuePurchasesExVAT": box_7,
"totalValueGoodsSuppliedExVAT": box_8,
"totalAcquisitionsExVAT": box_9,
"finalised": True # declaration
}
return data
@app.route("/<string:period_key>/preview")
@login_required
def preview_return(period_key):
g.period_key = period_key
g.vat_csv = request.args.get('vat_csv', '')
g.period_end = request.args.get('period_end', '')
if g.vat_csv:
g.data = return_data(g.period_key, g.period_end, g.vat_csv)
return render_template('preview_return.html')
@app.route("/<string:period_key>/send", methods=('POST',))
@login_required
def send_return(period_key):
# logging.warn(period_key)
confirmed = request.form.get('complete', None)
vat_csv = request.form.get('vat_csv')
g.period_end = request.form.get('period_end', '')
if not confirmed:
return redirect(url_for(
"preview_return",
period_key=period_key,
period_end=g.period_end,
confirmation_error=True))
else:
g.data = return_data(period_key, g.period_end, vat_csv)
g.response = do_action('post', 'returns', data=g.data)
return render_template('send_return.html')
@app.route("/logout")
def logout():
del(session['hmrc_oauth_token'])
del(session['hmrc_vat_number'])
return redirect(url_for("index"))
def create_test_user():
url = '/create-test-user/individuals'
api_host=app.config['HMRC_API_HOST']
return requests.post(
api_host + url,
data={
"serviceNames": [
"national-insurance",
"self-assessment",
"mtd-income-tax",
"customs-services",
"mtd-vat"
]
})
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('js', path)
@app.route('/img/<path:path>')
def send_img(path):
return send_from_directory('img', path)
| 34.15
| 186
| 0.644667
|
38506ad17eb28d55a72a0d603883e69598d033bb
| 3,244
|
py
|
Python
|
pytest_splunk_addon_ui_smartx/components/controls/oauth_select.py
|
artemrys/addon-factory-smartx-ui-test-library
|
a0ea68a2f90f5662eac383b74043f69e01830251
|
[
"Apache-2.0"
] | 1
|
2021-03-29T07:03:15.000Z
|
2021-03-29T07:03:15.000Z
|
pytest_splunk_addon_ui_smartx/components/controls/oauth_select.py
|
artemrys/addon-factory-smartx-ui-test-library
|
a0ea68a2f90f5662eac383b74043f69e01830251
|
[
"Apache-2.0"
] | 181
|
2020-12-15T19:31:07.000Z
|
2022-03-18T08:06:06.000Z
|
pytest_splunk_addon_ui_smartx/components/controls/oauth_select.py
|
splunk/addon-factory-smartx-ui-test-library
|
d1bb8d0ce728d53d3c15425c3b5a25e2e526d80e
|
[
"Apache-2.0"
] | 2
|
2022-02-01T18:39:37.000Z
|
2022-03-27T16:51:40.000Z
|
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from ..base_component import Selector
from .base_control import BaseControl
class OAuthSelect(BaseControl):
"""
Entity-Component: OAuthSelect
OAuthSelect Javascript framework: OAuthSelect
A dropdown which can select only one value
"""
def __init__(self, browser, container, searchable=True):
"""
:param browser: The selenium webdriver
:param container: The locator of the container where the control is located in.
"""
super().__init__(browser, container)
self.elements.update(
{
"values": Selector(select=container.select + ' [data-test="option"]'),
"dropdown": Selector(select=container.select + " .dropdownBox"),
}
)
def select(self, value):
"""
Selects the value within hte select dropdown
:param value: the value to select
:return: Bool if successful in selection, else raises an error
"""
self.dropdown.click()
popoverid = "#" + self.dropdown.get_attribute("data-test-popover-id")
self.elements.update(
{
"values": Selector(
select=popoverid
+ ' [data-test="option"]:not([data-test-selected="true"]) [data-test="label"]'
)
}
)
for each in self.get_elements("values"):
if each.text.strip().lower() == value.lower():
each.click()
return True
else:
raise ValueError("{} not found in select list".format(value))
def get_value(self):
"""
Gets the selected value
:return: Str The elected value within the dropdown, else returns blank
"""
try:
element = self.get_element("dropdown")
return element.get_attribute("data-test-value")
except:
return ""
def list_of_values(self):
"""
Gets the list of value from the Single Select
:returns: List of options from the single select
"""
selected_val = self.get_value()
self.container.click()
first_element = None
list_of_values = []
popoverid = "#" + self.dropdown.get_attribute("data-test-popover-id")
self.elements.update(
{"values": Selector(select=popoverid + ' [data-test="option"]')}
)
for each in self.get_elements("values"):
list_of_values.append(each.text.strip())
return list_of_values
| 32.44
| 98
| 0.609125
|
9693add9535f0d8729ba6cd2d4781a323ba18de7
| 1,324
|
py
|
Python
|
web_dynamic/4-hbnb.py
|
JoseMarulanda/AirBnB_clone_v4
|
939bb49decc49194fcd91e585f03ca85a27d9a5e
|
[
"MIT"
] | null | null | null |
web_dynamic/4-hbnb.py
|
JoseMarulanda/AirBnB_clone_v4
|
939bb49decc49194fcd91e585f03ca85a27d9a5e
|
[
"MIT"
] | null | null | null |
web_dynamic/4-hbnb.py
|
JoseMarulanda/AirBnB_clone_v4
|
939bb49decc49194fcd91e585f03ca85a27d9a5e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
import uuid
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/4-hbnb')
def hbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
return render_template('4-hbnb.html',
cache_id=uuid.uuid4(),
states=states,
amens=amens,
places=places,
users=users)
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
| 28.170213
| 75
| 0.608761
|
88f8ff30d10e8cb32963a1c312640f02dd4e0998
| 4,645
|
py
|
Python
|
perftest/spreadbenchmark.py
|
wendazhou/finufft
|
ba52c1d823186aac3ae33341921136b9ed3b6422
|
[
"Apache-2.0"
] | 134
|
2018-08-23T02:05:01.000Z
|
2022-03-27T05:13:20.000Z
|
perftest/spreadbenchmark.py
|
wendazhou/finufft
|
ba52c1d823186aac3ae33341921136b9ed3b6422
|
[
"Apache-2.0"
] | 157
|
2018-09-14T16:36:31.000Z
|
2022-03-31T23:42:44.000Z
|
perftest/spreadbenchmark.py
|
wendazhou/finufft
|
ba52c1d823186aac3ae33341921136b9ed3b6422
|
[
"Apache-2.0"
] | 66
|
2018-09-25T15:50:59.000Z
|
2022-03-21T00:42:28.000Z
|
import commands
import re
import sys
# Test set
tests = []
#tests.append({"dim":1, "M":1e8, "N":1e8, "tol":1e-3})
#tests.append({"dim":1, "M":1e8, "N":1e8, "tol":1e-8})
#tests.append({"dim":1, "M":1e8, "N":1e8, "tol":1e-15})
#tests.append({"dim":3, "M":1e7, "N":1e7, "tol":1e-3})
#tests.append({"dim":3, "M":1e7, "N":1e7, "tol":1e-8})
tests.append({"dim":3, "M":1e5, "N":1e5, "tol":1e-15})
tests.append({"dim":3, "M":1e6, "N":1e6, "tol":1e-15})
tests.append({"dim":3, "M":1e7, "N":1e7, "tol":1e-15})
# Flags to send
flags = "1"
# Make flags (eg OMP=OFF)
makeflags = ""
# Command template
cmdtemplate = "test/spreadtestnd %(dim)d %(M)g %(N)g %(tol)g " + flags
# Regexps to use
spreadre = re.compile(r'dir=1.*\n.*\s(?P<speed>\d+[\d\.e+-]*) pts/s.*\n.*rel err.*\s(?P<err>\d+[\d\.e+-]*)')
interpre = re.compile(r'dir=2.*\n.*\s(?P<speed>\d+[\d\.e+-]*) pts/s.*\n.*rel err.*\s(?P<err>\d+[\d\.e+-]*)')
commit_A = sys.argv[1]
commit_B = sys.argv[2]
print "* Comparing commits '%s' and '%s'" % (commit_A, commit_B)
# Find out where we are, so we can come back
gitout = commands.getoutput("git status -b --porcelain")
pos = re.match("## (.+)\n", gitout).group(1)
if re.match("HEAD ", pos):
home = commands.getoutput("git rev-parse HEAD")
print "* Seems we are currently in detached head, will return to commit %s" % home
else:
home = re.match("([^ \.]+)", pos).group(1)
print "* Will return to branch %s" % home
# Command runner
def runCommand(cmd):
print "> " + cmd
status, output = commands.getstatusoutput(cmd)
print output
return output
# Test runner
def runTests():
print "Running tests..."
results = []
for params in tests:
cmd = cmdtemplate % params
# Best of 3
interp_speed = 0
interp_err = 0
spread_speed = 0
spread_err = 0
for i in [1,2,3]:
output = runCommand(cmd).rstrip()
ms = spreadre.search(output)
mi = interpre.search(output)
interp_speed = max(interp_speed, mi.group("speed"))
interp_err = max(interp_err, mi.group("err"))
spread_speed = max(spread_speed, ms.group("speed"))
spread_err = max(spread_err, ms.group("err"))
results.append({"cmd":cmd,
"interp_speed":interp_speed,
"interp_err":interp_err,
"spread_speed":spread_speed,
"spread_err":spread_err })
return results
# Code checkout machinery
def checkoutandmake(commit):
if commit == "local":
# Do nothin, just make it
pass
else:
# Stash current source tree and check out commit
print "Stashing changes and checking out %s..." % commit
runCommand("git stash save")
runCommand("git checkout %s" % commit)
print "Making..."
print commands.getoutput("make clean test/spreadtestnd " + makeflags)
def restore(commit, home):
if commit == "local":
# We just tested local, so do nothing
pass
else:
# Return home and pop stash
print "Checking out %s and popping stash..." % home
runCommand("git checkout %s" % home)
runCommand("git stash pop")
# Run tests
print "* Testing %s" % commit_A
checkoutandmake(commit_A)
res_A = runTests()
restore(commit_A, home)
print "* Testing %s" % commit_B
checkoutandmake(commit_B)
res_B = runTests()
restore(commit_B, home)
# Present results
format = "%-15s | %-15s | %-15s | %-15s | %-15s "
header = format % ("Commit", "Spread spd", "Interp spd", "Spread err", "Interp err")
print ""
print "Make flags: " + makeflags
print ""
for idx,params in enumerate(tests):
print "=== Test: dim=%(dim)d, M=%(M)g, N=%(N)g, tol=%(tol)g" % params
print header
c = commit_A
r = res_A[idx]
print format % (c, r["spread_speed"]+" pts/s", r["interp_speed"]+" pts/s", r["spread_err"], r["interp_err"])
c = commit_B
r = res_B[idx]
print format % (c, r["spread_speed"]+" pts/s", r["interp_speed"]+" pts/s", r["spread_err"], r["interp_err"])
spread_speedup = float(res_B[idx]["spread_speed"]) / float(res_A[idx]["spread_speed"])*100 - 100
spread_sign = "+" if spread_speedup > 0 else ""
spread_speedup = spread_sign + "%.1f" % spread_speedup + "%"
interp_speedup = float(res_B[idx]["interp_speed"]) / float(res_A[idx]["interp_speed"])*100 - 100
interp_sign = "+" if interp_speedup > 0 else ""
interp_speedup = interp_sign + "%.1f" % interp_speedup + "%"
print format % ("", spread_speedup, interp_speedup, "", "")
print ""
| 32.711268
| 112
| 0.585791
|
a1b29d57dea2aa751c8e40b41ba4f19f20fdc9ba
| 2,910
|
py
|
Python
|
tests/test_cases/test_cocotb/test_tests.py
|
lavanyajagan/cocotb
|
2f98612016e68510e264a2b4963303d3588d8404
|
[
"BSD-3-Clause"
] | 350
|
2015-01-09T12:50:13.000Z
|
2019-07-12T09:08:17.000Z
|
tests/test_cases/test_cocotb/test_tests.py
|
lavanyajagan/cocotb
|
2f98612016e68510e264a2b4963303d3588d8404
|
[
"BSD-3-Clause"
] | 710
|
2015-01-05T16:42:29.000Z
|
2019-07-16T13:40:00.000Z
|
tests/test_cases/test_cocotb/test_tests.py
|
lavanyajagan/cocotb
|
2f98612016e68510e264a2b4963303d3588d8404
|
[
"BSD-3-Clause"
] | 182
|
2015-01-08T09:35:20.000Z
|
2019-07-12T18:41:37.000Z
|
# Copyright cocotb contributors
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
"""
Tests of cocotb.test functionality
* expect_error
* expect_fail
* timeout
"""
from collections.abc import Coroutine
from common import MyException
import cocotb
from cocotb.triggers import Timer
@cocotb.test(expect_error=NameError)
async def test_error(dut):
"""Error in the test"""
await Timer(100, "ns")
fail # noqa
@cocotb.test()
async def test_tests_are_tests(dut):
"""
Test that things annotated with cocotb.test are tests
"""
assert isinstance(test_tests_are_tests, cocotb.test)
# just to be sure...
@cocotb.test(expect_fail=True)
async def test_async_test_can_fail(dut):
assert False
@cocotb.test()
async def test_immediate_test(dut):
"""Test that tests can return immediately"""
return
@cocotb.test(expect_fail=True)
async def test_assertion_is_failure(dut):
assert False
@cocotb.test(expect_error=MyException)
async def test_expect_particular_exception(dut):
raise MyException()
@cocotb.test(expect_error=(MyException, ValueError))
async def test_expect_exception_list(dut):
raise MyException()
@cocotb.test(
expect_error=cocotb.result.SimTimeoutError, timeout_time=1, timeout_unit="ns"
)
async def test_timeout_testdec_fail(dut):
await Timer(10, "ns")
@cocotb.test(timeout_time=100, timeout_unit="ns")
async def test_timeout_testdec_pass(dut):
await Timer(10, "ns")
@cocotb.test(timeout_time=10, timeout_unit="ns")
async def test_timeout_testdec_simultaneous(dut):
try:
await cocotb.triggers.with_timeout(
Timer(1, "ns"), timeout_time=1, timeout_unit="ns"
)
except cocotb.result.SimTimeoutError:
pass
else:
assert False, "Expected a Timeout"
# Whether this test fails or passes depends on the behavior of the
# scheduler, simulator, and the implementation of the timeout function.
# CAUTION: THIS MAY CHANGE
# these tests should run in definition order, not lexicographic order
last_ordered_test = None
@cocotb.test()
async def test_ordering_3(dut):
global last_ordered_test
val, last_ordered_test = last_ordered_test, 3
assert val is None
@cocotb.test()
async def test_ordering_2(dut):
global last_ordered_test
val, last_ordered_test = last_ordered_test, 2
assert val == 3
@cocotb.test()
async def test_ordering_1(dut):
global last_ordered_test
val, last_ordered_test = last_ordered_test, 1
assert val == 2
@cocotb.test()
class TestClass(Coroutine):
def __init__(self, dut):
self._coro = self.run(dut)
async def run(self, dut):
pass
def send(self, value):
self._coro.send(value)
def throw(self, exception):
self._coro.throw(exception)
def __await__(self):
yield from self._coro.__await__()
| 22.55814
| 81
| 0.720619
|
842a4e72249c2995181f5213ca5590444b3b8fe0
| 845
|
py
|
Python
|
setup.py
|
jumbrich/pyanycsv
|
ebffa9ac066721d107557619833c138db5e61109
|
[
"MIT"
] | null | null | null |
setup.py
|
jumbrich/pyanycsv
|
ebffa9ac066721d107557619833c138db5e61109
|
[
"MIT"
] | null | null | null |
setup.py
|
jumbrich/pyanycsv
|
ebffa9ac066721d107557619833c138db5e61109
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for anycsv.
This file was generated with PyScaffold 3.0.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: http://pyscaffold.org/
"""
import sys
from setuptools import setup
# Add here console scripts and other entry points in ini-style format
entry_points = """
[console_scripts]
# script_name = anycsv.module:function
# For example:
# fibonacci = anycsv.skeleton:run
anycsv= anycsv.cli:main
"""
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['pyscaffold>=3.0a0,<3.1a0'] + sphinx,
entry_points=entry_points,
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
| 24.852941
| 75
| 0.695858
|
a99b93611834a5f05fc6c2acbd2d80a30ae7e86e
| 5,104
|
py
|
Python
|
preprocessing.py
|
lgraesser/MCER
|
250aa6965064dbc73462eb5edb559bf9ce949b70
|
[
"Apache-2.0"
] | null | null | null |
preprocessing.py
|
lgraesser/MCER
|
250aa6965064dbc73462eb5edb559bf9ce949b70
|
[
"Apache-2.0"
] | null | null | null |
preprocessing.py
|
lgraesser/MCER
|
250aa6965064dbc73462eb5edb559bf9ce949b70
|
[
"Apache-2.0"
] | null | null | null |
import logging
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
import preprocessing
import utils
logger = logging.getLogger('preprocessing')
logger.setLevel(logging.INFO)
def preprocess_images(img_name_vector, image_features_extract_model):
'''Extracts and saves image features for each image in img_name_vector.'''
# Get unique images
encode_train = sorted(set(img_name_vector))
# Feel free to change batch_size according to your system configuration
image_dataset = tf.data.Dataset.from_tensor_slices(encode_train)
image_dataset = image_dataset.map(
utils.load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(16)
logger.info('Extracting image features. This may take a while...')
for i, (img, path) in enumerate(image_dataset):
batch_features = image_features_extract_model(img)
batch_features = tf.reshape(batch_features,
(batch_features.shape[0], -1, batch_features.shape[3]))
for bf, p in zip(batch_features, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
if i % 50 == 0:
logger.info(f'Image feature extract, {i} batches done.')
logger.info('Saved all the image features.')
def preprocess_text(train_captions, vocab_size):
'''UNKS, tokenizes, and pads captions in train_captions.'''
logger.info(f'Vocab size: {vocab_size}')
tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=vocab_size,
oov_token="<unk>",
filters='!"#$%&()*+.,-/:;=?@[\]^_`{|}~ ')
tokenizer.fit_on_texts(train_captions)
# TODO: check why this line appears twice
train_seqs = tokenizer.texts_to_sequences(train_captions)
tokenizer.word_index['<pad>'] = 0
tokenizer.index_word[0] = '<pad>'
# Create the tokenized vectors
train_seqs = tokenizer.texts_to_sequences(train_captions)
# Pad each vector to the max_length of the captions
# If you do not provide a max_length value, pad_sequences calculates it automatically
caption_vector = tf.keras.preprocessing.sequence.pad_sequences(train_seqs, padding='post')
return caption_vector, tokenizer, train_seqs
# Find the maximum length of any caption in our dataset
def calc_max_length(tensor):
return max(len(t) for t in tensor)
def create_dataset(image_name_vector,
caption_vector,
train_seqs,
test_size=0.2,
batch_size=64,
buffer_size=1000):
'''Creates the dataset. Returns it shuffled and batched.'''
logger.info(f'Train-test split {1 - test_size}/{test_size}')
logger.info(f'Batch size: {batch_size}')
logger.info(f'Buffer size: {buffer_size}')
# Calculates the max_length, which is used to store the attention weights
max_length = calc_max_length(train_seqs)
logger.info(f'Max sequence length: {max_length}')
# Create training and validation sets using an 80-20 split
img_name_train, img_name_val, cap_train, cap_val = train_test_split(image_name_vector,
caption_vector,
test_size=test_size,
random_state=0)
logger.info(f'Train images: {len(img_name_train)}, train captions: {len(cap_train)}, val images: { len(img_name_val)}, val captions: {len(cap_val)}')
num_steps_train = len(img_name_train) // batch_size
num_steps_val = len(img_name_val) // batch_size
# Load the numpy files
def map_func(img_name, cap):
img_tensor = np.load(img_name.decode('utf-8')+'.npy')
return img_tensor, cap
# Create training dataset
dataset_train = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train))
# Use map to load the numpy files in parallel
dataset_train = dataset_train.map(lambda item1, item2: tf.numpy_function(
map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle and batch
dataset_train = dataset_train.shuffle(buffer_size).batch(batch_size)
dataset_train = dataset_train.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# Create validation dataset
dataset_val = tf.data.Dataset.from_tensor_slices((img_name_val, cap_val))
# Use map to load the numpy files in parallel
dataset_val = dataset_val.map(lambda item1, item2: tf.numpy_function(
map_func, [item1, item2], [tf.float32, tf.int32]),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle and batch
dataset_val = dataset_val.shuffle(buffer_size).batch(batch_size)
dataset_val = dataset_val.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return (dataset_train, dataset_val, num_steps_train, num_steps_val)
| 42.533333
| 153
| 0.664185
|
ba2f42480f5e4ac767c95ec39ced0a6708cf0915
| 43,949
|
py
|
Python
|
InnerEye/ML/deep_learning_config.py
|
albernsurya/InnerEye-DeepLearning
|
62ed6aace84c451a20c4e546f88987454c1bf4bd
|
[
"MIT"
] | 1
|
2021-07-03T14:05:17.000Z
|
2021-07-03T14:05:17.000Z
|
InnerEye/ML/deep_learning_config.py
|
albernsrya/InnerEye-DeepLearning
|
420fb1d452d7834d2c0a79c7bdc711ec16509680
|
[
"MIT"
] | null | null | null |
InnerEye/ML/deep_learning_config.py
|
albernsrya/InnerEye-DeepLearning
|
420fb1d452d7834d2c0a79c7bdc711ec16509680
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from __future__ import annotations
import logging
from enum import Enum, unique
from pathlib import Path
from typing import Any, Dict, List, Optional
import param
from pandas import DataFrame
from param import Parameterized
from InnerEye.Azure.azure_util import DEFAULT_CROSS_VALIDATION_SPLIT_INDEX, RUN_CONTEXT, is_offline_run_context
from InnerEye.Common import fixed_paths
from InnerEye.Common.common_util import is_windows
from InnerEye.Common.fixed_paths import DEFAULT_AML_UPLOAD_DIR, DEFAULT_LOGS_DIR_NAME
from InnerEye.Common.generic_parsing import GenericConfig
from InnerEye.Common.type_annotations import PathOrString, TupleFloat2
from InnerEye.ML.common import DATASET_CSV_FILE_NAME, ModelExecutionMode, create_unique_timestamp_id, \
get_best_checkpoint_path, get_recovery_checkpoint_path
# A folder inside of the outputs folder that will contain all information for running the model in inference mode
FINAL_MODEL_FOLDER = "final_model"
FINAL_ENSEMBLE_MODEL_FOLDER = "final_ensemble_model"
# The checkpoints must be stored inside of the final model folder, if we want to avoid copying
# them before registration.
CHECKPOINT_FOLDER = "checkpoints"
VISUALIZATION_FOLDER = "visualizations"
EXTRA_RUN_SUBFOLDER = "extra_run_id"
ARGS_TXT = "args.txt"
WEIGHTS_FILE = "weights.pth"
@unique
class LRWarmUpType(Enum):
"""
Supported LR warm up types for model training
"""
NoWarmUp = "NoWarmUp"
Linear = "Linear"
@unique
class LRSchedulerType(Enum):
"""
Supported lr scheduler types for model training
"""
Exponential = "Exponential"
Step = "Step"
Polynomial = "Polynomial"
Cosine = "Cosine"
MultiStep = "MultiStep"
@unique
class OptimizerType(Enum):
"""
Supported optimizers for model training
"""
Adam = "Adam"
AMSGrad = "AMSGrad"
SGD = "SGD"
RMSprop = "RMSprop"
@unique
class ModelCategory(Enum):
"""
Describes the different high-level model categories that the codebase supports.
"""
Segmentation = "Segmentation" # All models that perform segmentation: Classify each voxel in the input image.
Classification = "Classification" # All models that perform classification
Regression = "Regression" # All models that perform regression
@property
def is_scalar(self) -> bool:
"""
Return True if the current ModelCategory is either Classification or Regression
"""
return self in [ModelCategory.Classification, ModelCategory.Regression]
@unique
class MultiprocessingStartMethod(Enum):
"""
Different methods for starting data loader processes.
"""
fork = "fork"
forkserver = "forkserver"
spawn = "spawn"
class TemperatureScalingConfig(Parameterized):
"""High level config to encapsulate temperature scaling parameters"""
lr: float = param.Number(default=0.002, bounds=(0, None),
doc="The learning rate to use for the optimizer used to learn the "
"temperature scaling parameter")
max_iter: int = param.Number(default=50, bounds=(1, None),
doc="The maximum number of optimization iterations to use in order to "
"learn the temperature scaling parameter")
ece_num_bins: int = param.Number(default=15, bounds=(1, None),
doc="Number of bins to use when computing the "
"Expected Calibration Error")
class DeepLearningFileSystemConfig(Parameterized):
"""High level config to abstract the file system related configs for deep learning models"""
outputs_folder: Path = param.ClassSelector(class_=Path, default=Path(), instantiate=False,
doc="The folder where all training and test outputs should go.")
logs_folder: Path = param.ClassSelector(class_=Path, default=Path(), instantiate=False,
doc="The folder for all log files and Tensorboard event files")
project_root: Path = param.ClassSelector(class_=Path, default=Path(), instantiate=False,
doc="The root folder for the codebase that triggers the training run.")
run_folder: Path = param.ClassSelector(class_=Path, default=Path(), instantiate=False,
doc="The folder that contains outputs and the logs subfolder.")
@staticmethod
def create(project_root: Path,
is_offline_run: bool,
model_name: str,
output_to: Optional[str] = None) -> DeepLearningFileSystemConfig:
"""
Creates a new object that holds output folder configurations. When running inside of AzureML, the output
folders will be directly under the project root. If not running inside AzureML, a folder with a timestamp
will be created for all outputs and logs.
:param project_root: The root folder that contains the code that submitted the present training run.
When running inside the InnerEye repository, it is the git repo root. When consuming InnerEye as a package,
this should be the root of the source code that calls the package.
:param is_offline_run: If true, this is a run outside AzureML. If False, it is inside AzureML.
:param model_name: The name of the model that is trained. This is used to generate a run-specific output
folder.
:param output_to: If provided, the output folders will be created as a subfolder of this argument. If not
given, the output folders will be created inside of the project root.
"""
if not project_root.is_absolute():
raise ValueError(f"The project root is required to be an absolute path, but got {project_root}")
if is_offline_run or output_to:
if output_to:
logging.info(f"All results will be written to the specified output folder {output_to}")
root = Path(output_to).absolute()
else:
logging.info("All results will be written to a subfolder of the project root folder.")
root = project_root.absolute() / DEFAULT_AML_UPLOAD_DIR
timestamp = create_unique_timestamp_id()
run_folder = root / f"{timestamp}_{model_name}"
outputs_folder = run_folder
logs_folder = run_folder / DEFAULT_LOGS_DIR_NAME
else:
logging.info("Running inside AzureML.")
logging.info("All results will be written to a subfolder of the project root folder.")
run_folder = project_root
outputs_folder = project_root / DEFAULT_AML_UPLOAD_DIR
logs_folder = project_root / DEFAULT_LOGS_DIR_NAME
logging.info(f"Run outputs folder: {outputs_folder}")
logging.info(f"Logs folder: {logs_folder}")
return DeepLearningFileSystemConfig(
outputs_folder=outputs_folder,
logs_folder=logs_folder,
project_root=project_root,
run_folder=run_folder
)
def add_subfolder(self, subfolder: str) -> DeepLearningFileSystemConfig:
"""
Creates a new output folder configuration, where both outputs and logs go into the given subfolder inside
the present outputs folder.
:param subfolder: The subfolder that should be created.
:return:
"""
if self.run_folder:
outputs_folder = self.run_folder / subfolder
logs_folder = self.run_folder / subfolder / DEFAULT_LOGS_DIR_NAME
outputs_folder.mkdir(parents=True, exist_ok=True)
logs_folder.mkdir(parents=True, exist_ok=True)
return DeepLearningFileSystemConfig(
outputs_folder=outputs_folder,
logs_folder=logs_folder,
project_root=self.project_root
)
raise ValueError("This method should only be called for runs outside AzureML, when the logs folder is "
"inside the outputs folder.")
class WorkflowParams(param.Parameterized):
"""
This class contains all parameters that affect how the whole training and testing workflow is executed.
"""
random_seed: int = param.Integer(42, doc="The seed to use for all random number generators.")
number_of_cross_validation_splits: int = param.Integer(0, bounds=(0, None),
doc="Number of cross validation splits for k-fold cross "
"validation")
cross_validation_split_index: int = param.Integer(DEFAULT_CROSS_VALIDATION_SPLIT_INDEX, bounds=(-1, None),
doc="The index of the cross validation fold this model is "
"associated with when performing k-fold cross validation")
perform_training_set_inference: bool = \
param.Boolean(False,
doc="If True, run full image inference on the training set at the end of training. If False and "
"perform_validation_and_test_set_inference is True (default), only run inference on "
"validation and test set. If both flags are False do not run inference.")
perform_validation_and_test_set_inference: bool = \
param.Boolean(True,
doc="If True (default), run full image inference on validation and test set after training.")
weights_url: str = param.String(doc="If provided, a url from which weights will be downloaded and used for model "
"initialization.")
local_weights_path: Optional[Path] = param.ClassSelector(class_=Path,
default=None,
allow_None=True,
doc="The path to the weights to use for model "
"initialization, when training outside AzureML.")
generate_report: bool = param.Boolean(default=True,
doc="If True (default), write a modelling report in HTML format. If False,"
"do not write that report.")
# The default multiprocessing start_method in both PyTorch and the Python standard library is "fork" for Linux and
# "spawn" (the only available method) for Windows. There is some evidence that using "forkserver" on Linux
# can reduce the chance of stuck jobs.
multiprocessing_start_method: MultiprocessingStartMethod = \
param.ClassSelector(class_=MultiprocessingStartMethod,
default=(MultiprocessingStartMethod.spawn if is_windows()
else MultiprocessingStartMethod.fork),
doc="Method to be used to start child processes in pytorch. Should be one of forkserver, "
"fork or spawn. If not specified, fork is used on Linux and spawn on Windows. "
"Set to forkserver as a possible remedy for stuck jobs.")
monitoring_interval_seconds: int = param.Integer(0, doc="Seconds delay between logging GPU/CPU resource "
"statistics. If 0 or less, do not log any resource "
"statistics.")
regression_test_folder: Optional[Path] = \
param.ClassSelector(class_=Path, default=None, allow_None=True,
doc="A path to a folder that contains a set of files. At the end of training and "
"model evaluation, all files given in that folder must be present in the job's output "
"folder, and their contents must match exactly. When running in AzureML, you need to "
"ensure that this folder is part of the snapshot that gets uploaded. The path should "
"be relative to the repository root directory.")
def validate(self) -> None:
if self.weights_url and self.local_weights_path:
raise ValueError("Cannot specify both local_weights_path and weights_url.")
if self.number_of_cross_validation_splits == 1:
raise ValueError("At least two splits required to perform cross validation, but got "
f"{self.number_of_cross_validation_splits}. To train without cross validation, set "
"number_of_cross_validation_splits=0.")
if 0 < self.number_of_cross_validation_splits <= self.cross_validation_split_index:
raise ValueError(f"Cross validation split index is out of bounds: {self.cross_validation_split_index}, "
f"which is invalid for CV with {self.number_of_cross_validation_splits} splits.")
elif self.number_of_cross_validation_splits == 0 and self.cross_validation_split_index != -1:
raise ValueError(f"Cross validation split index must be -1 for a non cross validation run, "
f"found number_of_cross_validation_splits = {self.number_of_cross_validation_splits} "
f"and cross_validation_split_index={self.cross_validation_split_index}")
@property
def is_offline_run(self) -> bool:
"""
Returns True if the run is executing outside AzureML, or False if inside AzureML.
"""
return is_offline_run_context(RUN_CONTEXT)
@property
def perform_cross_validation(self) -> bool:
"""
True if cross validation will be be performed as part of the training procedure.
:return:
"""
return self.number_of_cross_validation_splits > 1
def get_effective_random_seed(self) -> int:
"""
Returns the random seed set as part of this configuration. If the configuration corresponds
to a cross validation split, then the cross validation fold index will be added to the
set random seed in order to return the effective random seed.
:return:
"""
seed = self.random_seed
if self.perform_cross_validation:
# offset the random seed based on the cross validation split index so each
# fold has a different initial random state.
seed += self.cross_validation_split_index
return seed
class DatasetParams(param.Parameterized):
azure_dataset_id: str = param.String(doc="If provided, the ID of the dataset to use when running in AzureML. "
"This dataset must exist as a folder of the same name in the 'datasets' "
"container in the datasets storage account. This dataset will be mounted "
"and made available at the 'local_dataset' path when running in AzureML.")
local_dataset: Optional[Path] = \
param.ClassSelector(class_=Path, default=None, allow_None=True,
doc="The path of the dataset to use, when training is running outside Azure.")
extra_azure_dataset_ids: List[str] = \
param.List(default=[], allow_None=False,
doc="This can be used to feed in additional datasets to your custom datamodules. These will be"
"mounted and made available as a list of paths in 'extra_local_datasets' when running in AML.")
extra_local_dataset_paths: List[Path] = param.List(class_=Path, default=[], allow_None=False,
doc="This can be used to feed in additional datasets "
"to your custom datamodules when running outside of Azure "
"AML.")
dataset_mountpoint: str = param.String(doc="The path at which the AzureML dataset should be made available via "
"mounting or downloading. This only affects jobs running in AzureML."
"If empty, use a random mount/download point.")
extra_dataset_mountpoints: List[str] = \
param.List(default=[], allow_None=False,
doc="The mounting points for the datasets given in extra_azure_dataset_ids, when running in "
"AzureML. Use an empty string for all datasets where a randomly chosen mount/download point "
"should be used.")
def validate(self) -> None:
if not self.azure_dataset_id and self.local_dataset is None:
raise ValueError("Either of local_dataset or azure_dataset_id must be set.")
if self.all_dataset_mountpoints() and len(self.all_azure_dataset_ids()) != len(self.all_dataset_mountpoints()):
raise ValueError(f"Expected the number of azure datasets to equal the number of mountpoints, "
f"got datasets [{','.join(self.all_azure_dataset_ids())}] "
f"and mountpoints [{','.join(self.all_dataset_mountpoints())}]")
def all_azure_dataset_ids(self) -> List[str]:
"""
Returns a list with all azure dataset IDs that are specified in self.azure_dataset_id and
self.extra_azure_dataset_ids
"""
if not self.azure_dataset_id:
return self.extra_azure_dataset_ids
else:
return [self.azure_dataset_id] + self.extra_azure_dataset_ids
def all_dataset_mountpoints(self) -> List[str]:
"""
Returns a list with all dataset mount points that are specified in self.dataset_mountpoint and
self.extra_dataset_mountpoints
"""
if not self.dataset_mountpoint:
return self.extra_dataset_mountpoints
else:
return [self.dataset_mountpoint] + self.extra_dataset_mountpoints
class OutputParams(param.Parameterized):
output_to: str = param.String(default="",
doc="If provided, the run outputs will be written to the given folder. If not "
"provided, outputs will go into a subfolder of the project root folder.")
file_system_config: DeepLearningFileSystemConfig = param.ClassSelector(default=DeepLearningFileSystemConfig(),
class_=DeepLearningFileSystemConfig,
instantiate=False,
doc="File system related configs")
_model_name: str = param.String("", doc="The human readable name of the model (for example, Liver). This is "
"usually set from the class name.")
@property
def model_name(self) -> str:
"""
Gets the human readable name of the model (e.g., Liver). This is usually set from the class name.
:return: A model name as a string.
"""
return self._model_name
def set_output_to(self, output_to: PathOrString) -> None:
"""
Adjusts the file system settings in the present object such that all outputs are written to the given folder.
:param output_to: The absolute path to a folder that should contain the outputs.
"""
if isinstance(output_to, Path):
output_to = str(output_to)
self.output_to = output_to
self.create_filesystem()
def create_filesystem(self, project_root: Path = fixed_paths.repository_root_directory()) -> None:
"""
Creates new file system settings (outputs folder, logs folder) based on the information stored in the
present object. If any of the folders do not yet exist, they are created.
:param project_root: The root folder for the codebase that triggers the training run.
"""
self.file_system_config = DeepLearningFileSystemConfig.create(
project_root=project_root,
model_name=self.model_name,
is_offline_run=is_offline_run_context(RUN_CONTEXT),
output_to=self.output_to
)
@property
def outputs_folder(self) -> Path:
"""Gets the full path in which the model outputs should be stored."""
return self.file_system_config.outputs_folder
@property
def logs_folder(self) -> Path:
"""Gets the full path in which the model logs should be stored."""
return self.file_system_config.logs_folder
@property
def checkpoint_folder(self) -> Path:
"""Gets the full path in which the model checkpoints should be stored during training."""
return self.outputs_folder / CHECKPOINT_FOLDER
@property
def visualization_folder(self) -> Path:
"""Gets the full path in which the visualizations notebooks should be saved during training."""
return self.outputs_folder / VISUALIZATION_FOLDER
def get_path_to_checkpoint(self) -> Path:
"""
Returns the full path to a recovery checkpoint.
"""
return get_recovery_checkpoint_path(self.checkpoint_folder)
def get_path_to_best_checkpoint(self) -> Path:
"""
Returns the full path to a checkpoint file that was found to be best during training, whatever criterion
was applied there.
"""
return get_best_checkpoint_path(self.checkpoint_folder)
class OptimizerParams(param.Parameterized):
l_rate: float = param.Number(1e-4, doc="The initial learning rate", bounds=(0, None))
_min_l_rate: float = param.Number(0.0, doc="The minimum learning rate for the Polynomial and Cosine schedulers.",
bounds=(0.0, None))
l_rate_scheduler: LRSchedulerType = param.ClassSelector(default=LRSchedulerType.Polynomial,
class_=LRSchedulerType,
instantiate=False,
doc="Learning rate decay method (Cosine, Polynomial, "
"Step, MultiStep or Exponential)")
l_rate_exponential_gamma: float = param.Number(0.9, doc="Controls the rate of decay for the Exponential "
"LR scheduler.")
l_rate_step_gamma: float = param.Number(0.1, doc="Controls the rate of decay for the "
"Step LR scheduler.")
l_rate_step_step_size: int = param.Integer(50, bounds=(0, None),
doc="The step size for Step LR scheduler")
l_rate_multi_step_gamma: float = param.Number(0.1, doc="Controls the rate of decay for the "
"MultiStep LR scheduler.")
l_rate_multi_step_milestones: Optional[List[int]] = param.List(None, bounds=(1, None),
allow_None=True, class_=int,
doc="The milestones for MultiStep decay.")
l_rate_polynomial_gamma: float = param.Number(1e-4, doc="Controls the rate of decay for the "
"Polynomial LR scheduler.")
l_rate_warmup: LRWarmUpType = param.ClassSelector(default=LRWarmUpType.NoWarmUp, class_=LRWarmUpType,
instantiate=False,
doc="The type of learning rate warm up to use. "
"Can be NoWarmUp (default) or Linear.")
l_rate_warmup_epochs: int = param.Integer(0, bounds=(0, None),
doc="Number of warmup epochs (linear warmup) before the "
"scheduler starts decaying the learning rate. "
"For example, if you are using MultiStepLR with "
"milestones [50, 100, 200] and warmup epochs = 100, warmup "
"will last for 100 epochs and the first decay of LR "
"will happen on epoch 150")
optimizer_type: OptimizerType = param.ClassSelector(default=OptimizerType.Adam, class_=OptimizerType,
instantiate=False, doc="The optimizer_type to use")
opt_eps: float = param.Number(1e-4, doc="The epsilon parameter of RMSprop or Adam")
rms_alpha: float = param.Number(0.9, doc="The alpha parameter of RMSprop")
adam_betas: TupleFloat2 = param.NumericTuple((0.9, 0.999), length=2,
doc="The betas parameter of Adam, default is (0.9, 0.999)")
momentum: float = param.Number(0.6, doc="The momentum parameter of the optimizers")
weight_decay: float = param.Number(1e-4, doc="The weight decay used to control L2 regularization")
def validate(self) -> None:
if len(self.adam_betas) < 2:
raise ValueError(
"The adam_betas parameter should be the coefficients used for computing running averages of "
"gradient and its square")
if self.l_rate_scheduler == LRSchedulerType.MultiStep:
if not self.l_rate_multi_step_milestones:
raise ValueError("Must specify l_rate_multi_step_milestones to use LR scheduler MultiStep")
if sorted(set(self.l_rate_multi_step_milestones)) != self.l_rate_multi_step_milestones:
raise ValueError("l_rate_multi_step_milestones must be a strictly increasing list")
if self.l_rate_multi_step_milestones[0] <= 0:
raise ValueError("l_rate_multi_step_milestones cannot be negative or 0.")
@property
def min_l_rate(self) -> float:
return self._min_l_rate
@min_l_rate.setter
def min_l_rate(self, value: float) -> None:
if value > self.l_rate:
raise ValueError("l_rate must be >= min_l_rate, found: {}, {}".format(self.l_rate, value))
self._min_l_rate = value
class TrainerParams(param.Parameterized):
num_epochs: int = param.Integer(100, bounds=(1, None), doc="Number of epochs to train.")
recovery_checkpoint_save_interval: int = param.Integer(10, bounds=(0, None),
doc="Save epoch checkpoints when epoch number is a multiple "
"of recovery_checkpoint_save_interval. The intended use "
"is to allow restore training from failed runs.")
recovery_checkpoints_save_last_k: int = param.Integer(default=1, bounds=(-1, None),
doc="Number of recovery checkpoints to keep. Recovery "
"checkpoints will be stored as recovery_epoch:{"
"epoch}.ckpt. If set to -1 keep all recovery "
"checkpoints.")
detect_anomaly: bool = param.Boolean(False, doc="If true, test gradients for anomalies (NaN or Inf) during "
"training.")
use_mixed_precision: bool = param.Boolean(False, doc="If true, mixed precision training is activated during "
"training.")
max_num_gpus: int = param.Integer(default=-1, doc="The maximum number of GPUS to use. If set to a value < 0, use"
"all available GPUs. In distributed training, this is the "
"maximum number of GPUs per node.")
pl_progress_bar_refresh_rate: Optional[int] = \
param.Integer(default=None,
doc="PyTorch Lightning trainer flag 'progress_bar_refresh_rate': How often to refresh progress "
"bar (in steps). Value 0 disables progress bar. Value None chooses automatically.")
pl_num_sanity_val_steps: int = \
param.Integer(default=0,
doc="PyTorch Lightning trainer flag 'num_sanity_val_steps': Number of validation "
"steps to run before training, to identify possible problems")
pl_deterministic: bool = \
param.Integer(default=True,
doc="Controls the PyTorch Lightning trainer flags 'deterministic' and 'benchmark'. If "
"'pl_deterministic' is True, results are perfectly reproducible. If False, they are not, but "
"you may see training speed increases.")
pl_find_unused_parameters: bool = \
param.Boolean(default=False,
doc="Controls the PyTorch Lightning flag 'find_unused_parameters' for the DDP plugin. "
"Setting it to True comes with a performance hit.")
@property
def use_gpu(self) -> bool:
"""
Returns True if a GPU is available, and the self.max_num_gpus flag allows it to be used. Returns False
otherwise (i.e., if there is no GPU available, or self.max_num_gpus==0)
"""
if self.max_num_gpus == 0:
return False
from InnerEye.ML.utils.ml_util import is_gpu_available
return is_gpu_available()
@property
def num_gpus_per_node(self) -> int:
"""
Computes the number of gpus to use for each node: either the number of gpus available on the device
or restrict it to max_num_gpu, whichever is smaller. Returns 0 if running on a CPU device.
"""
import torch
num_gpus = torch.cuda.device_count() if self.use_gpu else 0
logging.info(f"Number of available GPUs: {num_gpus}")
if 0 <= self.max_num_gpus < num_gpus:
num_gpus = self.max_num_gpus
logging.info(f"Restricting the number of GPUs to {num_gpus}")
elif self.max_num_gpus > num_gpus:
logging.warning(f"You requested max_num_gpus {self.max_num_gpus} but there are only {num_gpus} available.")
return num_gpus
class DeepLearningConfig(WorkflowParams,
DatasetParams,
OutputParams,
OptimizerParams,
TrainerParams,
GenericConfig):
"""
A class that holds all settings that are shared across segmentation models and regression/classification models.
"""
_model_category: ModelCategory = param.ClassSelector(class_=ModelCategory,
doc="The high-level model category described by this config.")
num_dataload_workers: int = param.Integer(2, bounds=(0, None),
doc="The number of data loading workers (processes). When set to 0,"
"data loading is running in the same process (no process startup "
"cost, hence good for use in unit testing. However, it "
"does not give the same result as running with 1 worker process)")
shuffle: bool = param.Boolean(True, doc="If true, the dataset will be shuffled randomly during training.")
train_batch_size: int = param.Integer(4, bounds=(0, None),
doc="The number of crops that make up one minibatch during training.")
use_model_parallel: bool = param.Boolean(False, doc="If true, neural network model is partitioned across all "
"available GPUs to fit in a large model. It shall not be used "
"together with data parallel.")
pin_memory: bool = param.Boolean(True, doc="Value of pin_memory argument to DataLoader")
restrict_subjects: Optional[str] = \
param.String(doc="Use at most this number of subjects for train, val, or test set (must be > 0 or None). "
"If None, do not modify the train, val, or test sets. If a string of the form 'i,j,k' where "
"i, j and k are integers, modify just the corresponding sets (i for train, j for val, k for "
"test). If any of i, j or j are missing or are negative, do not modify the corresponding "
"set. Thus a value of 20,,5 means limit training set to 20, keep validation set as is, and "
"limit test set to 5. If any of i,j,k is '+', discarded members of the other sets are added "
"to that set.",
allow_None=True)
_dataset_data_frame: Optional[DataFrame] = \
param.DataFrame(default=None,
doc="The dataframe that contains the dataset for the model. This is usually read from disk "
"from dataset.csv")
avoid_process_spawn_in_data_loaders: bool = \
param.Boolean(is_windows(), doc="If True, use a data loader logic that avoid spawning new processes at the "
"start of each epoch. This speeds up training on both Windows and Linux, but"
"on Linux, inference is currently disabled as the data loaders hang. "
"If False, use the default data loader logic that starts new processes for "
"each epoch.")
max_batch_grad_cam: int = param.Integer(default=0, doc="Max number of validation batches for which "
"to save gradCam images. By default "
"visualizations are saved for all images "
"in the validation set")
label_smoothing_eps: float = param.Number(0.0, bounds=(0.0, 1.0),
doc="Target smoothing value for label smoothing")
log_to_parent_run: bool = param.Boolean(default=False, doc="If true, hyperdrive child runs will log their metrics"
"to their parent run.")
use_imbalanced_sampler_for_training: bool = param.Boolean(default=False,
doc="If True, use an imbalanced sampler during training.")
drop_last_batch_in_training: bool = param.Boolean(default=False,
doc="If True, drop the last incomplete batch during"
"training. If all batches are complete, no batch gets "
"dropped. If False, keep all batches.")
log_summaries_to_files: bool = param.Boolean(
default=True,
doc="If True, model summaries are logged to files in logs/model_summaries; "
"if False, to stdout or driver log")
mean_teacher_alpha: float = param.Number(bounds=(0, 1), allow_None=True, default=None,
doc="If this value is set, the mean teacher model will be computed. "
"Currently only supported for scalar models. In this case, we only "
"report metrics and cross-validation results for "
"the mean teacher model. Likewise the model used for inference "
"is the mean teacher model. The student model is only used for "
"training. Alpha is the momentum term for weight updates of the mean "
"teacher model. After each training step the mean teacher model "
"weights are updated using mean_teacher_"
"weight = alpha * (mean_teacher_weight) "
" + (1-alpha) * (current_student_weights). ")
#: Name of the csv file providing information on the dataset to be used.
dataset_csv: str = param.String(
DATASET_CSV_FILE_NAME,
doc="Name of the CSV file providing information on the dataset to be used. "
"For segmentation models, this file must contain at least the fields: `subject`, `channel`, `filePath`.")
def __init__(self, **params: Any) -> None:
self._model_name = type(self).__name__
# This should be annotated as torch.utils.data.Dataset, but we don't want to import torch here.
self._datasets_for_training: Optional[Dict[ModelExecutionMode, Any]] = None
self._datasets_for_inference: Optional[Dict[ModelExecutionMode, Any]] = None
self.recovery_start_epoch = 0
super().__init__(throw_if_unknown_param=True, **params)
logging.info("Creating the default output folder structure.")
self.create_filesystem(fixed_paths.repository_root_directory())
# Disable the PL progress bar because all InnerEye models have their own console output
self.pl_progress_bar_refresh_rate = 0
self.extra_downloaded_run_id: Optional[Any] = None
def validate(self) -> None:
"""
Validates the parameters stored in the present object.
"""
WorkflowParams.validate(self)
OptimizerParams.validate(self)
DatasetParams.validate(self)
@property
def model_category(self) -> ModelCategory:
"""
Gets the high-level model category that this configuration objects represents (segmentation or scalar output).
"""
return self._model_category
@property
def is_segmentation_model(self) -> bool:
"""
Returns True if the present model configuration belongs to the high-level category ModelCategory.Segmentation.
"""
return self.model_category == ModelCategory.Segmentation
@property
def is_scalar_model(self) -> bool:
"""
Returns True if the present model configuration belongs to the high-level category ModelCategory.Scalar
i.e. for Classification or Regression models.
"""
return self.model_category.is_scalar
@property
def compute_grad_cam(self) -> bool:
return self.max_batch_grad_cam > 0
@property
def dataset_data_frame(self) -> Optional[DataFrame]:
"""
Gets the pandas data frame that the model uses.
:return:
"""
return self._dataset_data_frame
@dataset_data_frame.setter
def dataset_data_frame(self, data_frame: Optional[DataFrame]) -> None:
"""
Sets the pandas data frame that the model uses.
:param data_frame: The data frame to set.
"""
self._dataset_data_frame = data_frame
def get_train_epochs(self) -> List[int]:
"""
Returns the epochs for which training will be performed.
:return:
"""
return list(range(self.recovery_start_epoch + 1, self.num_epochs + 1))
def get_total_number_of_training_epochs(self) -> int:
"""
Returns the number of epochs for which a model will be trained.
:return:
"""
return len(self.get_train_epochs())
def get_total_number_of_validation_epochs(self) -> int:
"""
Returns the number of epochs for which a model will be validated.
:return:
"""
return self.get_total_number_of_training_epochs()
@property
def compute_mean_teacher_model(self) -> bool:
"""
Returns True if the mean teacher model should be computed.
"""
return self.mean_teacher_alpha is not None
def __str__(self) -> str:
"""Returns a string describing the present object, as a list of key: value strings."""
arguments_str = "\nArguments:\n"
# Avoid callable params, the bindings that are printed out can be humongous.
# Avoid dataframes
skip_params = {name for name, value in self.param.params().items()
if isinstance(value, (param.Callable, param.DataFrame))}
for key, value in self.param.get_param_values():
if key not in skip_params:
arguments_str += f"\t{key:40}: {value}\n"
return arguments_str
def load_checkpoint_and_modify(self, path_to_checkpoint: Path) -> Dict[str, Any]:
"""
By default, uses torch.load to read and return the state dict from the checkpoint file, and does no modification
of the checkpoint file.
Overloading this function:
When weights_url or local_weights_path is set, the file downloaded may not be in the exact
format expected by the model's load_state_dict() - for example, pretrained Imagenet weights for networks
may have mismatched layer names in different implementations.
In such cases, you can overload this function to extract the state dict from the checkpoint.
NOTE: The model checkpoint will be loaded using the torch function load_state_dict() with argument strict=False,
so extra care needs to be taken to check that the state dict is valid.
Check the logs for warnings related to missing and unexpected keys.
See https://pytorch.org/tutorials/beginner/saving_loading_models.html#warmstarting-model-using-parameters
-from-a-different-model
for an explanation on why strict=False is useful when loading parameters from other models.
:param path_to_checkpoint: Path to the checkpoint file.
:return: Dictionary with model and optimizer state dicts. The dict should have at least the following keys:
1. Key ModelAndInfo.MODEL_STATE_DICT_KEY and value set to the model state dict.
2. Key ModelAndInfo.EPOCH_KEY and value set to the checkpoint epoch.
Other (optional) entries corresponding to keys ModelAndInfo.OPTIMIZER_STATE_DICT_KEY and
ModelAndInfo.MEAN_TEACHER_STATE_DICT_KEY are also supported.
"""
return load_checkpoint(path_to_checkpoint=path_to_checkpoint, use_gpu=self.use_gpu)
def load_checkpoint(path_to_checkpoint: Path, use_gpu: bool = True) -> Dict[str, Any]:
"""
Loads a Torch checkpoint from the given file. If use_gpu==False, map all parameters to the GPU, otherwise
left the device of all parameters unchanged.
"""
import torch
map_location = None if use_gpu else 'cpu'
checkpoint = torch.load(str(path_to_checkpoint), map_location=map_location)
return checkpoint
| 57.374674
| 120
| 0.602266
|
2aef22d4627d38e20852c7d1e304430948bd3192
| 3,940
|
py
|
Python
|
tests/unit_tests/homeassistant/test_frunk_lock.py
|
ehendrix23/teslajsonpy
|
7e5a86acc053df4e990bfece4db37d2cbb6ac5e0
|
[
"Apache-2.0"
] | null | null | null |
tests/unit_tests/homeassistant/test_frunk_lock.py
|
ehendrix23/teslajsonpy
|
7e5a86acc053df4e990bfece4db37d2cbb6ac5e0
|
[
"Apache-2.0"
] | null | null | null |
tests/unit_tests/homeassistant/test_frunk_lock.py
|
ehendrix23/teslajsonpy
|
7e5a86acc053df4e990bfece4db37d2cbb6ac5e0
|
[
"Apache-2.0"
] | null | null | null |
"""Test frunk lock."""
import pytest
import time
from teslajsonpy.controller import Controller
from teslajsonpy.homeassistant.trunk import FrunkLock
from tests.tesla_mock import TeslaMock, VIN, CAR_ID
LAST_UPDATE_TIME = time.time()
def test_has_battery(monkeypatch):
"""Test has_battery()."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_data = _mock.data_request_vehicle()
_lock = FrunkLock(_data, _controller)
assert not _lock.has_battery()
def test_is_locked_on_init(monkeypatch):
"""Test is_locked() after initialization."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_data = _mock.data_request_vehicle()
_lock = FrunkLock(_data, _controller)
assert _lock is not None
assert not _lock.is_locked()
@pytest.mark.asyncio
async def test_is_locked_after_update(monkeypatch):
"""Test is_locked() after an update."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_controller.set_id_vin(CAR_ID, VIN)
_controller.set_last_update_time(vin=VIN, timestamp=LAST_UPDATE_TIME)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["ft"] = 0
_lock = FrunkLock(_data, _controller)
_controller.set_state_params(vin=VIN, params=_data["vehicle_state"])
await _lock.async_update()
assert _lock is not None
assert _lock.is_locked()
@pytest.mark.asyncio
async def test_unlock(monkeypatch):
"""Test unlock()."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_controller.set_id_vin(CAR_ID, VIN)
_controller.set_last_update_time(vin=VIN, timestamp=LAST_UPDATE_TIME)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["ft"] = 0
_lock = FrunkLock(_data, _controller)
_controller.set_state_params(vin=VIN, params=_data["vehicle_state"])
await _lock.async_update()
await _lock.unlock()
assert _lock is not None
assert not _lock.is_locked()
@pytest.mark.asyncio
async def test_unlock_already_unlocked(monkeypatch):
"""Test unlock() when already unlocked."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_controller.set_id_vin(CAR_ID, VIN)
_controller.set_last_update_time(vin=VIN, timestamp=LAST_UPDATE_TIME)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["ft"] = 123
_lock = FrunkLock(_data, _controller)
_controller.set_state_params(vin=VIN, params=_data["vehicle_state"])
await _lock.async_update()
await _lock.unlock()
assert _lock is not None
assert not _lock.is_locked()
# Reset to default for next tests
_data["vehicle_state"]["ft"] = 0
@pytest.mark.asyncio
async def test_lock(monkeypatch):
"""Test lock()."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_controller.set_id_vin(CAR_ID, VIN)
_controller.set_last_update_time(vin=VIN, timestamp=LAST_UPDATE_TIME)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["ft"] = 123
_lock = FrunkLock(_data, _controller)
_controller.set_state_params(vin=VIN, params=_data["vehicle_state"])
await _lock.async_update()
await _lock.lock()
assert _lock is not None
assert _lock.is_locked()
# Reset to default for next tests
_data["vehicle_state"]["ft"] = 0
@pytest.mark.asyncio
async def test_lock_already_locked(monkeypatch):
"""Test lock() when already locked."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_controller.set_id_vin(CAR_ID, VIN)
_controller.set_last_update_time(vin=VIN, timestamp=LAST_UPDATE_TIME)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["ft"] = 0
_lock = FrunkLock(_data, _controller)
_controller.set_state_params(vin=VIN, params=_data["vehicle_state"])
await _lock.async_update()
await _lock.lock()
assert _lock is not None
assert _lock.is_locked()
| 25.921053
| 73
| 0.721574
|
761697770b7ee8d113906fc6bd9849589e1c7dc1
| 2,565
|
py
|
Python
|
docs/tools/purge_cache_for_changed_files.py
|
amosnothing/ClickHouse
|
cf49a839806290c41a3a1ccd5808687d7ccaca78
|
[
"Apache-2.0"
] | 8
|
2019-06-04T02:50:13.000Z
|
2022-02-10T06:46:51.000Z
|
docs/tools/purge_cache_for_changed_files.py
|
amosnothing/ClickHouse
|
cf49a839806290c41a3a1ccd5808687d7ccaca78
|
[
"Apache-2.0"
] | 16
|
2021-06-07T21:32:30.000Z
|
2022-03-31T21:08:29.000Z
|
docs/tools/purge_cache_for_changed_files.py
|
amosnothing/ClickHouse
|
cf49a839806290c41a3a1ccd5808687d7ccaca78
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import requests
import os
import time
FNAME_START = "+++"
CLOUDFLARE_URL = "https://api.cloudflare.com/client/v4/zones/4fc6fb1d46e87851605aa7fa69ca6fe0/purge_cache"
# we have changes in revision and commit sha on all pages
# so such changes have to be ignored
MIN_CHANGED_WORDS = 4
def collect_changed_files():
proc = subprocess.Popen("git diff HEAD~1 --word-diff=porcelain | grep -e '^+[^+]\|^\-[^\-]\|^\+\+\+'", stdout=subprocess.PIPE, shell=True)
changed_files = []
current_file_name = ""
changed_words = []
while True:
line = proc.stdout.readline().decode("utf-8").strip()
if not line:
break
if FNAME_START in line:
if changed_words:
if len(changed_words) > MIN_CHANGED_WORDS:
changed_files.append(current_file_name)
changed_words = []
current_file_name = line[6:]
else:
changed_words.append(line)
return changed_files
def filter_and_transform_changed_files(changed_files, base_domain):
result = []
for f in changed_files:
if f.endswith(".html"):
result.append(base_domain + f.replace("index.html", ""))
return result
def convert_to_dicts(changed_files, batch_size):
result = []
current_batch = {"files": []}
for f in changed_files:
if len(current_batch["files"]) >= batch_size:
result.append(current_batch)
current_batch = {"files": []}
current_batch["files"].append(f)
if current_batch["files"]:
result.append(current_batch)
return result
def post_data(prepared_batches, token):
headers = {"Authorization": "Bearer {}".format(token)}
for batch in prepared_batches:
print("Pugring cache for", ", ".join(batch["files"]))
response = requests.post(CLOUDFLARE_URL, json=batch, headers=headers)
response.raise_for_status()
time.sleep(3)
if __name__ == "__main__":
token = os.getenv("CLOUDFLARE_TOKEN")
if not token:
raise Exception("Env variable CLOUDFLARE_TOKEN is empty")
base_domain = os.getenv("BASE_DOMAIN", "https://content.clickhouse.tech/")
changed_files = collect_changed_files()
print("Found", len(changed_files), "changed files")
filtered_files = filter_and_transform_changed_files(changed_files, base_domain)
print("Files rest after filtering", len(filtered_files))
prepared_batches = convert_to_dicts(filtered_files, 25)
post_data(prepared_batches, token)
| 32.468354
| 142
| 0.661598
|
4a9c6ab04b03bb899829b621a6a70d8969931f8c
| 1,256
|
py
|
Python
|
website/instalacion.py
|
RHoK-Bilbao/desahucios
|
1815f7398fa7e7f2182d76bb2d3a0b13869bd8c7
|
[
"BSD-2-Clause"
] | null | null | null |
website/instalacion.py
|
RHoK-Bilbao/desahucios
|
1815f7398fa7e7f2182d76bb2d3a0b13869bd8c7
|
[
"BSD-2-Clause"
] | null | null | null |
website/instalacion.py
|
RHoK-Bilbao/desahucios
|
1815f7398fa7e7f2182d76bb2d3a0b13869bd8c7
|
[
"BSD-2-Clause"
] | null | null | null |
import urllib2
import os
import getpass
import traceback
import os
import sys
os.system("pip install -r requirements.txt")
if os.path.exists("rhok_desahucios.sql"):
rhok_db = open("rhok_desahucios.sql").read()
else:
rhok_db = urllib2.urlopen("http://dev.morelab.deusto.es/rhok_desahucios.sql").read()
open("rhok_desahucios.sql",'w').write(rhok_db)
user = "root"
password = getpass.getpass("Dame password mysql")
import pymysql as dbi
print "Creating database..."
try:
connection = dbi.connect(user=user, passwd=password, host="127.0.0.1")
cursor = connection.cursor()
if sys.platform.startswith('linux'):
cursor.execute("""CREATE DATABASE IF NOT EXISTS `rhok_desahucios` ;
CREATE USER 'rhok'@'localhost' IDENTIFIED BY 'rhok';
GRANT ALL PRIVILEGES ON `rhok_desahucios`.* TO `rhok`@`localhost`;""")
else:
cursor.execute("""CREATE DATABASE IF NOT EXISTS `rhok_desahucios` ;
CREATE USER 'rhok' IDENTIFIED BY 'rhok';
GRANT ALL PRIVILEGES ON `rhok_desahucios`.* TO `rhok`;""")
connection.commit()
cursor.close()
connection.close()
except:
traceback.print_exc()
print "done"
print "Adding content..."
os.system("mysql -urhok -p rhok_desahucios < rhok_desahucios.sql")
| 29.209302
| 88
| 0.696656
|
b50359b5096ab54e8f8825a460173cb40714e4d5
| 255
|
py
|
Python
|
scripts/extract_author.py
|
UO-CIS-322/grading
|
d008f861a332dd20270ef5dcb03fd730dc5cf2cc
|
[
"BSD-2-Clause"
] | 1
|
2017-10-31T07:22:11.000Z
|
2017-10-31T07:22:11.000Z
|
scripts/extract_author.py
|
UO-CIS-322/grading
|
d008f861a332dd20270ef5dcb03fd730dc5cf2cc
|
[
"BSD-2-Clause"
] | 1
|
2019-10-27T06:28:23.000Z
|
2019-10-27T06:28:23.000Z
|
scripts/extract_author.py
|
UO-CIS-322/grading
|
d008f861a332dd20270ef5dcb03fd730dc5cf2cc
|
[
"BSD-2-Clause"
] | 2
|
2016-11-17T01:04:27.000Z
|
2017-10-09T17:04:24.000Z
|
"""
Extract the 'author = ' line from a credentials file.
File must have fixed name credentials.py, in this directory
"""
try:
import credentials
print(credentials.author)
except Exception as err:
print("***Unable to extract author line***")
| 25.5
| 59
| 0.709804
|
7c6990006ee919e5e48f99a4eb6018fe5e944e71
| 782
|
py
|
Python
|
NegNumsMatrix/NegNumsMatrix.py
|
javiaspiroz/ProgramacionConcurrenteDistribuida
|
3c998893b47fc739c7755ce510198b448bed88b5
|
[
"MIT"
] | null | null | null |
NegNumsMatrix/NegNumsMatrix.py
|
javiaspiroz/ProgramacionConcurrenteDistribuida
|
3c998893b47fc739c7755ce510198b448bed88b5
|
[
"MIT"
] | null | null | null |
NegNumsMatrix/NegNumsMatrix.py
|
javiaspiroz/ProgramacionConcurrenteDistribuida
|
3c998893b47fc739c7755ce510198b448bed88b5
|
[
"MIT"
] | null | null | null |
def neg(a):
acu=0
fila=0
colu=len(a)-1
while colu>=0 and fila<len(a):
if a[fila][colu]<0:
acu+=(colu+1)
fila+=1
else:
colu-=1
return acu
print(neg(
[[-8,-6,-4,0],
[-3,-3,1,0],
[-1,1,1,1],
[1,2,4,9]]))
#sol 6
print(neg(
[[-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0],[-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2],[-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4],
[-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6],[-4,-3,-2,-1,0,1,2,3,4,5,6,7,8],[-2,-1,0,1,2,3,4,5,6,7,8,9,10],
[0,1,2,3,4,5,6,7,8,9,10,11,12],[2,3,4,5,6,7,8,9,10,11,12,13,14],[4,5,6,7,8,9,10,11,12,13,14,15,16],
[6,7,8,9,10,11,12,13,14,15,16,17,18],[8,9,10,11,12,13,14,15,16,17,18,19,20],[10,11,12,13,14,15,16,17,18,19,20,21,22],
[12,13,14,15,16,17,18,19,20,21,22,23,24]]))
#sol 42
| 30.076923
| 119
| 0.466752
|
b207bf265aa7adbc3ceb590690b0777377bf4ac2
| 1,616
|
py
|
Python
|
software/raspberrypi/eyellowcam/lib/osutil.py
|
andidevel/eyellow
|
6d98917521943145aee30025d2b6a8314a154032
|
[
"MIT"
] | null | null | null |
software/raspberrypi/eyellowcam/lib/osutil.py
|
andidevel/eyellow
|
6d98917521943145aee30025d2b6a8314a154032
|
[
"MIT"
] | null | null | null |
software/raspberrypi/eyellowcam/lib/osutil.py
|
andidevel/eyellow
|
6d98917521943145aee30025d2b6a8314a154032
|
[
"MIT"
] | null | null | null |
# MIT License
# Copyright (c) 2021 Anderson R. Livramento
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import pwd
import subprocess
def create_process(cmd, username=None):
def demote(user_id, group_id):
def result():
os.setgid(group_id)
os.setuid(user_id)
return result
fn = None
if username:
user = pwd.getpwnam(username)
fn = demote(user.pw_uid, user.pw_gid)
return subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, preexec_fn=fn, shell=True)
def shutdown():
create_process('shutdown -h now')
| 37.581395
| 107
| 0.741955
|
ae6c08670abe99491400e1062e7f3c4b184e927d
| 292
|
py
|
Python
|
Docs/Examples/talks/interpol_05.py
|
Vectro-Type-Foundry/robofab
|
cd65d78292d24358c98dce53d283314cdc85878e
|
[
"BSD-3-Clause"
] | 61
|
2015-01-17T10:15:45.000Z
|
2018-12-02T13:53:02.000Z
|
Docs/Examples/talks/interpol_05.py
|
Vectro-Type-Foundry/robofab
|
cd65d78292d24358c98dce53d283314cdc85878e
|
[
"BSD-3-Clause"
] | 37
|
2015-01-05T23:44:56.000Z
|
2018-03-16T19:05:28.000Z
|
Docs/Examples/talks/interpol_05.py
|
Vectro-Type-Foundry/robofab
|
cd65d78292d24358c98dce53d283314cdc85878e
|
[
"BSD-3-Clause"
] | 25
|
2015-01-08T19:49:36.000Z
|
2018-10-29T00:36:46.000Z
|
# robothon06
# prepare glyph for interpolation
# move startpoints
# fix directions
# fix contour order
from robofab.world import CurrentFont
f = CurrentFont()
glyph = f["A"]
glyph.autoContourOrder()
glyph.correctDirection()
for c in glyph.contours:
c.autoStartSegment()
glyph.update()
| 17.176471
| 37
| 0.760274
|
2d7ff60bf4f042b274ac417ac8ceec7c641ae99b
| 930
|
py
|
Python
|
Baisc_Calculator/Calculator.py
|
cy275/Statistics_Calculator
|
c98dec271df98465a87180a170a786bcf817800c
|
[
"MIT"
] | null | null | null |
Baisc_Calculator/Calculator.py
|
cy275/Statistics_Calculator
|
c98dec271df98465a87180a170a786bcf817800c
|
[
"MIT"
] | null | null | null |
Baisc_Calculator/Calculator.py
|
cy275/Statistics_Calculator
|
c98dec271df98465a87180a170a786bcf817800c
|
[
"MIT"
] | null | null | null |
from Baisc_Calculator.Addition import addition
from Baisc_Calculator.Subtraction import subtraction
from Baisc_Calculator.Multiplication import multiplication
from Baisc_Calculator.Division import division
from Baisc_Calculator.Square import square
from Baisc_Calculator.Square_Root import square_root
class Calculator:
result = 0
def __init__(self):
pass
def add(self, a, b):
self.result = addition(a, b)
return self.result
def subtract(self, a, b):
self.result = subtraction(a, b)
return self.result
def multiply(self, a, b):
self.result = multiplication(a, b)
return self.result
def divide(self, a, b):
self.result = division(a, b)
return self.result
def square(self, a):
self.result = square(a)
return self.result
def sqrt(self, a):
self.result = square_root(a)
return self.result
| 24.473684
| 58
| 0.670968
|
f780b32d79288a2d4dd9a83e7905e7c6ab230390
| 315
|
py
|
Python
|
DeleteTriggerFunc/__init__.py
|
jplck/dnsresolver
|
c02ed41bc2089df1f3dc28ca8af44d704d13d8a5
|
[
"MIT"
] | null | null | null |
DeleteTriggerFunc/__init__.py
|
jplck/dnsresolver
|
c02ed41bc2089df1f3dc28ca8af44d704d13d8a5
|
[
"MIT"
] | null | null | null |
DeleteTriggerFunc/__init__.py
|
jplck/dnsresolver
|
c02ed41bc2089df1f3dc28ca8af44d704d13d8a5
|
[
"MIT"
] | null | null | null |
import logging
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
return func.HttpResponse(
"This HTTP triggered function executed successfully. Pass a name in the query string or in the request body for a personalized response.",
status_code=200
)
| 26.25
| 146
| 0.72381
|
2eac9572643d1107b74ec3027c6ea5264f43b1c0
| 622
|
py
|
Python
|
apps/rent/migrations/0002_auto_20210221_0609.py
|
christianalcantara/book_backend
|
5c98aad01a1ea7d7985cafa14c6de7eb3d0b48af
|
[
"MIT"
] | 1
|
2021-02-23T00:55:14.000Z
|
2021-02-23T00:55:14.000Z
|
apps/rent/migrations/0002_auto_20210221_0609.py
|
christianalcantara/book_backend
|
5c98aad01a1ea7d7985cafa14c6de7eb3d0b48af
|
[
"MIT"
] | 1
|
2021-02-23T00:33:05.000Z
|
2021-02-23T00:33:05.000Z
|
apps/rent/migrations/0002_auto_20210221_0609.py
|
christianalcantara/book_backend
|
5c98aad01a1ea7d7985cafa14c6de7eb3d0b48af
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-02-21 06:09
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("book", "0002_auto_20210219_2129"),
("rent", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="catalog",
name="book",
field=models.OneToOneField(
on_delete=django.db.models.deletion.PROTECT,
related_name="catalog",
to="book.book",
verbose_name="Book",
),
),
]
| 24.88
| 60
| 0.55627
|
dbad735ad6d5e59a683b52ee9bb8dfaa4c4ccc10
| 4,550
|
py
|
Python
|
tests/test_utils.py
|
ebelter/mgi
|
37ea9cbddb3d64d3f0b9db4357f76f80a16d52e5
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
ebelter/mgi
|
37ea9cbddb3d64d3f0b9db4357f76f80a16d52e5
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
ebelter/mgi
|
37ea9cbddb3d64d3f0b9db4357f76f80a16d52e5
|
[
"MIT"
] | null | null | null |
import click, os, pathlib, tempfile, unittest
from click.testing import CliRunner
class UtilsCliTest(unittest.TestCase):
def setUp(self):
self.temp_d = tempfile.TemporaryDirectory()
self.db_fn = os.path.join(self.temp_d.name, "test.db")
self.db_url = "sqlite:///" + self.db_fn
os.environ["SQLALCHEMY_DATABASE_URI"] = self.db_url
def tearDown(self):
self.temp_d.cleanup()
def test_utils_cli(self):
from mgi.cli import cli
from mgi.utils import utils_cli, utils_db_cli, db_create_cmd
runner = CliRunner()
# cli utils
result = runner.invoke(cli, ["utils"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(cli, ["utils", "-h"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(cli, ["utils", "--help"])
self.assertEqual(result.exit_code, 0)
# utils_cli
result = runner.invoke(utils_cli, [])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(utils_cli, ["--help"])
self.assertEqual(result.exit_code, 0)
# cli utils db
result = runner.invoke(cli, ["utils", "db"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(cli, ["utils", "db", "-h"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(cli, ["utils", "db", "--help"])
self.assertEqual(result.exit_code, 0)
# utils_cli db
result = runner.invoke(utils_db_cli, [])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(utils_db_cli, ["--help"])
self.assertEqual(result.exit_code, 0)
# cli utils db create
result = runner.invoke(cli, ["utils", "db", "create", "-h"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(cli, ["utils", "db", "create", "--help"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(cli, ["utils", "db", "create"])
self.assertEqual(result.exit_code, 2)
# utils_db_cli create
result = runner.invoke(utils_db_cli, ["create"])
self.assertEqual(result.exit_code, 2)
result = runner.invoke(utils_db_cli, ["create", "--help"])
self.assertEqual(result.exit_code, 0)
# db_create_cmd
result = runner.invoke(db_create_cmd, [])
self.assertEqual(result.exit_code, 2)
result = runner.invoke(db_create_cmd, ["--help"])
self.assertEqual(result.exit_code, 0)
def test_create_db(self):
from mgi.utils import create_db
create_db(self.db_url)
self.assertTrue(os.path.exists(self.db_fn))
def test_db_create_cmd(self):
from mgi.utils import db_create_cmd as cmd
runner = CliRunner()
# Fails - Existing DB
pathlib.Path(self.db_fn).touch()
with self.assertRaisesRegex(Exception, "exists"):
result = runner.invoke(cmd, [self.db_url], catch_exceptions=False)
self.assertEqual(result.exit_code, 1)
os.remove(self.db_fn)
# Success
result = runner.invoke(cmd, [self.db_url], catch_exceptions=False)
try:
self.assertEqual(result.exit_code, 0)
except:
print(result.output)
raise
expected_output = f"Created DB with {self.db_url}\n"
self.assertEqual(result.output, expected_output)
self.assertTrue(os.path.exists(self.db_fn))
def test_db_set_cmd(self):
from mgi.utils import db_set_cmd as cmd
runner = CliRunner()
result = runner.invoke(cmd, [], catch_exceptions=False)
try:
self.assertEqual(result.exit_code, 0)
except:
print(result.output)
raise
expected_output = f"export SQLALCHEMY_DATABASE_URI=sqlite:///tests/data/db\n"
def test_db_show_cmd(self):
from mgi.utils import db_show_cmd as cmd
runner = CliRunner()
result = runner.invoke(cmd, [], catch_exceptions=False)
try:
self.assertEqual(result.exit_code, 0)
except:
print(result.output)
raise
expected_output = f"{self.db_fn}\n"
os.environ.pop
result = runner.invoke(cmd, [], catch_exceptions=False)
try:
self.assertEqual(result.exit_code, 0)
except:
print(result.output)
raise
expected_output = f"None\n"
# -- UtilsCliTest
if __name__ == '__main__':
unittest.main(verbosity=2)
| 34.469697
| 85
| 0.609231
|
dd8098259865b20af6c08f97a8722cbee8a9a197
| 3,553
|
py
|
Python
|
examples/simple-example.py
|
evanscottgray/pydegensac
|
0b146e5c002833116adb59e3f676c9d35542fef5
|
[
"MIT"
] | 191
|
2020-04-24T15:17:36.000Z
|
2022-03-30T03:08:47.000Z
|
examples/simple-example.py
|
evanscottgray/pydegensac
|
0b146e5c002833116adb59e3f676c9d35542fef5
|
[
"MIT"
] | 11
|
2020-06-20T12:49:01.000Z
|
2022-03-31T11:20:58.000Z
|
examples/simple-example.py
|
evanscottgray/pydegensac
|
0b146e5c002833116adb59e3f676c9d35542fef5
|
[
"MIT"
] | 29
|
2020-04-25T06:12:06.000Z
|
2022-02-28T03:57:46.000Z
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import cv2
import pydegensac
from time import time
from copy import deepcopy
#Now helper function for running homography RANSAC
def verify_cv2(kps1, kps2, tentatives, th = 4.0 , n_iter = 2000):
src_pts = np.float32([ kps1[m.queryIdx].pt for m in tentatives ]).reshape(-1,2)
dst_pts = np.float32([ kps2[m.trainIdx].pt for m in tentatives ]).reshape(-1,2)
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, th, 0.99, n_iter)
print ('cv2 found {} inliers'.format(int(deepcopy(mask).astype(np.float32).sum())))
return H, mask
def verify_pydegensac(kps1, kps2, tentatives, th = 4.0, n_iter = 2000):
src_pts = np.float32([ kps1[m.queryIdx].pt for m in tentatives ]).reshape(-1,2)
dst_pts = np.float32([ kps2[m.trainIdx].pt for m in tentatives ]).reshape(-1,2)
H, mask = pydegensac.findHomography(src_pts, dst_pts, th, 0.99, n_iter)
print ('pydegensac found {} inliers'.format(int(deepcopy(mask).astype(np.float32).sum())))
return H, mask
def verify_cv2_fundam(kps1, kps2, tentatives, th = 1.0 , n_iter = 10000):
src_pts = np.float32([ kps1[m.queryIdx].pt for m in tentatives ]).reshape(-1,2)
dst_pts = np.float32([ kps2[m.trainIdx].pt for m in tentatives ]).reshape(-1,2)
F, mask = cv2.findFundamentalMat(src_pts, dst_pts, cv2.RANSAC, th, 0.999, n_iter)
print ('cv2 found {} inliers'.format(int(deepcopy(mask).astype(np.float32).sum())))
return F, mask
def verify_pydegensac_fundam(kps1, kps2, tentatives, th = 1.0, n_iter = 10000):
src_pts = np.float32([ kps1[m.queryIdx].pt for m in tentatives ]).reshape(-1,2)
dst_pts = np.float32([ kps2[m.trainIdx].pt for m in tentatives ]).reshape(-1,2)
F, mask = pydegensac.findFundamentalMatrix(src_pts, dst_pts, th, 0.999, n_iter, enable_degeneracy_check= True)
print ('pydegensac found {} inliers'.format(int(deepcopy(mask).astype(np.float32).sum())))
return F, mask
if __name__ == '__main__':
img1 = cv2.cvtColor(cv2.imread('img/v_dogman/1.ppm'), cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(cv2.imread('img/v_dogman/6.ppm'), cv2.COLOR_BGR2RGB)
# SIFT is not available by pip install, so lets use AKAZE features
det = cv2.AKAZE_create(descriptor_type = 3, threshold=0.00001)
kps1, descs1 = det.detectAndCompute(img1,None)
kps2, descs2 = det.detectAndCompute(img2,None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(descs1,descs2, k=2)
matchesMask = [False for i in range(len(matches))]
# SNN ratio test
for i,(m,n) in enumerate(matches):
if m.distance < 0.9*n.distance:
matchesMask[i]=True
tentatives = [m[0] for i, m in enumerate(matches) if matchesMask[i] ]
th = 4.0
n_iter = 2000
t=time()
print ("Running homography estimation")
cv2_H, cv2_mask = verify_cv2(kps1,kps2,tentatives, th, n_iter )
print ("OpenCV runtime {0:.5f}".format(time()-t), ' sec')
t=time()
cmp_H, cmp_mask = verify_pydegensac(kps1,kps2,tentatives, th, n_iter)
print ("pydegensac runtime {0:.5f}".format(time()-t), ' sec')
print ("H = ", cmp_H)
th = 0.5
n_iter = 50000
print ("Running fundamental matrix estimation")
t=time()
cv2_H, cv2_mask = verify_cv2_fundam(kps1,kps2,tentatives, th, n_iter )
print ("OpenCV runtime {0:.5f}".format(time()-t), ' sec')
t=time()
cmp_H, cmp_mask = verify_pydegensac_fundam(kps1,kps2,tentatives, th, n_iter)
print ("pydegensac {0:.5f}".format(time()-t), ' sec')
print ("F = ", cmp_H)
| 46.142857
| 114
| 0.676048
|
e3bc9640cd1fccd71ee97d439a8bb62f5191bdba
| 6,668
|
py
|
Python
|
nets/vgg.py
|
bubbliiiing/ssd-tf2
|
481fe21bc0c7db6c132235f2dd1daadc97bd9416
|
[
"MIT"
] | 104
|
2020-07-13T02:35:55.000Z
|
2022-03-29T06:58:55.000Z
|
nets/vgg.py
|
wanghaowen1998/ssd-tf2
|
4f74d39ff63ee4a8b9b3ebff58c361c77fef8718
|
[
"MIT"
] | 10
|
2020-07-14T05:51:00.000Z
|
2021-11-08T16:57:26.000Z
|
nets/vgg.py
|
wanghaowen1998/ssd-tf2
|
4f74d39ff63ee4a8b9b3ebff58c361c77fef8718
|
[
"MIT"
] | 35
|
2020-08-07T03:07:07.000Z
|
2022-03-16T01:44:03.000Z
|
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
def VGG16(input_tensor):
#----------------------------主干特征提取网络开始---------------------------#
# SSD结构,net字典
net = {}
# Block 1
net['input'] = input_tensor
# 300,300,3 -> 150,150,64
net['conv1_1'] = Conv2D(64, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv1_1')(net['input'])
net['conv1_2'] = Conv2D(64, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv1_2')(net['conv1_1'])
net['pool1'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same',
name='pool1')(net['conv1_2'])
# Block 2
# 150,150,64 -> 75,75,128
net['conv2_1'] = Conv2D(128, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv2_1')(net['pool1'])
net['conv2_2'] = Conv2D(128, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv2_2')(net['conv2_1'])
net['pool2'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same',
name='pool2')(net['conv2_2'])
# Block 3
# 75,75,128 -> 38,38,256
net['conv3_1'] = Conv2D(256, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv3_1')(net['pool2'])
net['conv3_2'] = Conv2D(256, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv3_2')(net['conv3_1'])
net['conv3_3'] = Conv2D(256, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv3_3')(net['conv3_2'])
net['pool3'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same',
name='pool3')(net['conv3_3'])
# Block 4
# 38,38,256 -> 19,19,512
net['conv4_1'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv4_1')(net['pool3'])
net['conv4_2'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv4_2')(net['conv4_1'])
net['conv4_3'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv4_3')(net['conv4_2'])
net['pool4'] = MaxPooling2D((2, 2), strides=(2, 2), padding='same',
name='pool4')(net['conv4_3'])
# Block 5
# 19,19,512 -> 19,19,512
net['conv5_1'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv5_1')(net['pool4'])
net['conv5_2'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv5_2')(net['conv5_1'])
net['conv5_3'] = Conv2D(512, kernel_size=(3,3),
activation='relu',
padding='same',
name='conv5_3')(net['conv5_2'])
net['pool5'] = MaxPooling2D((3, 3), strides=(1, 1), padding='same',
name='pool5')(net['conv5_3'])
# FC6
# 19,19,512 -> 19,19,1024
net['fc6'] = Conv2D(1024, kernel_size=(3,3), dilation_rate=(6, 6),
activation='relu', padding='same',
name='fc6')(net['pool5'])
# x = Dropout(0.5, name='drop6')(x)
# FC7
# 19,19,1024 -> 19,19,1024
net['fc7'] = Conv2D(1024, kernel_size=(1,1), activation='relu',
padding='same', name='fc7')(net['fc6'])
# x = Dropout(0.5, name='drop7')(x)
# Block 6
# 19,19,512 -> 10,10,512
net['conv6_1'] = Conv2D(256, kernel_size=(1,1), activation='relu',
padding='same',
name='conv6_1')(net['fc7'])
net['conv6_2'] = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv6_padding')(net['conv6_1'])
net['conv6_2'] = Conv2D(512, kernel_size=(3,3), strides=(2, 2),
activation='relu',
name='conv6_2')(net['conv6_2'])
# Block 7
# 10,10,512 -> 5,5,256
net['conv7_1'] = Conv2D(128, kernel_size=(1,1), activation='relu',
padding='same',
name='conv7_1')(net['conv6_2'])
net['conv7_2'] = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv7_padding')(net['conv7_1'])
net['conv7_2'] = Conv2D(256, kernel_size=(3,3), strides=(2, 2),
activation='relu', padding='valid',
name='conv7_2')(net['conv7_2'])
# Block 8
# 5,5,256 -> 3,3,256
net['conv8_1'] = Conv2D(128, kernel_size=(1,1), activation='relu',
padding='same',
name='conv8_1')(net['conv7_2'])
net['conv8_2'] = Conv2D(256, kernel_size=(3,3), strides=(1, 1),
activation='relu', padding='valid',
name='conv8_2')(net['conv8_1'])
# Block 9
# 3,3,256 -> 1,1,256
net['conv9_1'] = Conv2D(128, kernel_size=(1,1), activation='relu',
padding='same',
name='conv9_1')(net['conv8_2'])
net['conv9_2'] = Conv2D(256, kernel_size=(3,3), strides=(1, 1),
activation='relu', padding='valid',
name='conv9_2')(net['conv9_1'])
#----------------------------主干特征提取网络结束---------------------------#
return net
| 50.900763
| 99
| 0.393071
|
4228ee40f5745ed4e826c27f405f28bb7111dddc
| 5,887
|
py
|
Python
|
src/oci/ai_vision/models/document_field.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/ai_vision/models/document_field.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/ai_vision/models/document_field.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DocumentField(object):
"""
Form field.
"""
#: A constant which can be used with the field_type property of a DocumentField.
#: This constant has a value of "LINE_ITEM_GROUP"
FIELD_TYPE_LINE_ITEM_GROUP = "LINE_ITEM_GROUP"
#: A constant which can be used with the field_type property of a DocumentField.
#: This constant has a value of "LINE_ITEM"
FIELD_TYPE_LINE_ITEM = "LINE_ITEM"
#: A constant which can be used with the field_type property of a DocumentField.
#: This constant has a value of "LINE_ITEM_FIELD"
FIELD_TYPE_LINE_ITEM_FIELD = "LINE_ITEM_FIELD"
#: A constant which can be used with the field_type property of a DocumentField.
#: This constant has a value of "KEY_VALUE"
FIELD_TYPE_KEY_VALUE = "KEY_VALUE"
def __init__(self, **kwargs):
"""
Initializes a new DocumentField object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param field_type:
The value to assign to the field_type property of this DocumentField.
Allowed values for this property are: "LINE_ITEM_GROUP", "LINE_ITEM", "LINE_ITEM_FIELD", "KEY_VALUE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type field_type: str
:param field_label:
The value to assign to the field_label property of this DocumentField.
:type field_label: oci.ai_vision.models.FieldLabel
:param field_name:
The value to assign to the field_name property of this DocumentField.
:type field_name: oci.ai_vision.models.FieldName
:param field_value:
The value to assign to the field_value property of this DocumentField.
:type field_value: oci.ai_vision.models.FieldValue
"""
self.swagger_types = {
'field_type': 'str',
'field_label': 'FieldLabel',
'field_name': 'FieldName',
'field_value': 'FieldValue'
}
self.attribute_map = {
'field_type': 'fieldType',
'field_label': 'fieldLabel',
'field_name': 'fieldName',
'field_value': 'fieldValue'
}
self._field_type = None
self._field_label = None
self._field_name = None
self._field_value = None
@property
def field_type(self):
"""
**[Required]** Gets the field_type of this DocumentField.
Field type.
Allowed values for this property are: "LINE_ITEM_GROUP", "LINE_ITEM", "LINE_ITEM_FIELD", "KEY_VALUE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The field_type of this DocumentField.
:rtype: str
"""
return self._field_type
@field_type.setter
def field_type(self, field_type):
"""
Sets the field_type of this DocumentField.
Field type.
:param field_type: The field_type of this DocumentField.
:type: str
"""
allowed_values = ["LINE_ITEM_GROUP", "LINE_ITEM", "LINE_ITEM_FIELD", "KEY_VALUE"]
if not value_allowed_none_or_none_sentinel(field_type, allowed_values):
field_type = 'UNKNOWN_ENUM_VALUE'
self._field_type = field_type
@property
def field_label(self):
"""
Gets the field_label of this DocumentField.
:return: The field_label of this DocumentField.
:rtype: oci.ai_vision.models.FieldLabel
"""
return self._field_label
@field_label.setter
def field_label(self, field_label):
"""
Sets the field_label of this DocumentField.
:param field_label: The field_label of this DocumentField.
:type: oci.ai_vision.models.FieldLabel
"""
self._field_label = field_label
@property
def field_name(self):
"""
Gets the field_name of this DocumentField.
:return: The field_name of this DocumentField.
:rtype: oci.ai_vision.models.FieldName
"""
return self._field_name
@field_name.setter
def field_name(self, field_name):
"""
Sets the field_name of this DocumentField.
:param field_name: The field_name of this DocumentField.
:type: oci.ai_vision.models.FieldName
"""
self._field_name = field_name
@property
def field_value(self):
"""
**[Required]** Gets the field_value of this DocumentField.
:return: The field_value of this DocumentField.
:rtype: oci.ai_vision.models.FieldValue
"""
return self._field_value
@field_value.setter
def field_value(self, field_value):
"""
Sets the field_value of this DocumentField.
:param field_value: The field_value of this DocumentField.
:type: oci.ai_vision.models.FieldValue
"""
self._field_value = field_value
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 33.448864
| 245
| 0.656871
|
185d4859f7d82f37c58b9a3bc895b576134a9846
| 11,299
|
py
|
Python
|
google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/security_settings.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/security_settings.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/security_settings.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.cx.v3beta1',
manifest={
'GetSecuritySettingsRequest',
'UpdateSecuritySettingsRequest',
'ListSecuritySettingsRequest',
'ListSecuritySettingsResponse',
'CreateSecuritySettingsRequest',
'DeleteSecuritySettingsRequest',
'SecuritySettings',
},
)
class GetSecuritySettingsRequest(proto.Message):
r"""The request message for
[SecuritySettingsService.GetSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.GetSecuritySettings].
Attributes:
name (str):
Required. Resource name of the settings. Format:
``projects/<Project ID>/locations/<Location ID>/securitySettings/<security settings ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class UpdateSecuritySettingsRequest(proto.Message):
r"""The request message for
[SecuritySettingsService.UpdateSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.UpdateSecuritySettings].
Attributes:
security_settings (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings):
Required. [SecuritySettings] object that contains values for
each of the fields to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields
get updated. If the mask is not present, all
fields will be updated.
"""
security_settings = proto.Field(
proto.MESSAGE,
number=1,
message='SecuritySettings',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class ListSecuritySettingsRequest(proto.Message):
r"""The request message for [SecuritySettings.ListSecuritySettings][].
Attributes:
parent (str):
Required. The location to list all security settings for.
Format: ``projects/<Project ID>/locations/<Location ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 20 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListSecuritySettingsResponse(proto.Message):
r"""The response message for [SecuritySettings.ListSecuritySettings][].
Attributes:
security_settings (Sequence[google.cloud.dialogflowcx_v3beta1.types.SecuritySettings]):
The list of security settings.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
security_settings = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='SecuritySettings',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class CreateSecuritySettingsRequest(proto.Message):
r"""The request message for [SecuritySettings.CreateSecuritySettings][].
Attributes:
parent (str):
Required. The location to create an
[SecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettings]
for. Format:
``projects/<Project ID>/locations/<Location ID>``.
security_settings (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings):
Required. The security settings to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
security_settings = proto.Field(
proto.MESSAGE,
number=2,
message='SecuritySettings',
)
class DeleteSecuritySettingsRequest(proto.Message):
r"""The request message for [SecuritySettings.DeleteSecuritySettings][].
Attributes:
name (str):
Required. The name of the
[SecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettings]
to delete. Format:
``projects/<Project ID>/locations/<Location ID>/securitySettings/<Security Settings ID>``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class SecuritySettings(proto.Message):
r"""Represents the settings related to security issues, such as
data redaction and data retention. It may take hours for updates
on the settings to propagate to all the related components and
take effect.
Attributes:
name (str):
Resource name of the settings. Required for the
[SecuritySettingsService.UpdateSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.UpdateSecuritySettings]
method.
[SecuritySettingsService.CreateSecuritySettings][google.cloud.dialogflow.cx.v3beta1.SecuritySettingsService.CreateSecuritySettings]
populates the name automatically. Format:
``projects/<Project ID>/locations/<Location ID>/securitySettings/<Security Settings ID>``.
display_name (str):
Required. The human-readable name of the
security settings, unique within the location.
redaction_strategy (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.RedactionStrategy):
Strategy that defines how we do redaction.
redaction_scope (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.RedactionScope):
Defines the data for which Dialogflow applies
redaction. Dialogflow does not redact data that
it does not have access to – for example, Cloud
logging.
inspect_template (str):
`DLP <https://cloud.google.com/dlp/docs>`__ inspect template
name. Use this template to define inspect base settings.
If empty, we use the default DLP inspect config.
The template name will have one of the following formats:
``projects/<Project ID>/locations/<Location ID>/inspectTemplates/<Template ID>``
OR
``organizations/<Organization ID>/locations/<Location ID>/inspectTemplates/<Template ID>``
Note: ``inspect_template`` must be located in the same
region as the ``SecuritySettings``.
deidentify_template (str):
`DLP <https://cloud.google.com/dlp/docs>`__ deidentify
template name. Use this template to define de-identification
configuration for the content.
If empty, Dialogflow replaces sensitive info with
``[redacted]`` text.
The template name will have one of the following formats:
``projects/<Project ID>/locations/<Location ID>/deidentifyTemplates/<Template ID>``
OR
``organizations/<Organization ID>/locations/<Location ID>/deidentifyTemplates/<Template ID>``
Note: ``deidentify_template`` must be located in the same
region as the ``SecuritySettings``.
retention_window_days (int):
Retains data in interaction logging for the
specified number of days. This does not apply to
Cloud logging, which is owned by the user - not
Dialogflow.
User must Set a value lower than Dialogflow's
default 30d TTL. Setting a value higher than
that has no effect.
A missing value or setting to 0 also means we
use Dialogflow's default TTL.
Note: Interaction logging is a limited access
feature. Talk to your Google representative to
check availability for you.
purge_data_types (Sequence[google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.PurgeDataType]):
List of types of data to remove when
retention settings triggers purge.
insights_export_settings (google.cloud.dialogflowcx_v3beta1.types.SecuritySettings.InsightsExportSettings):
Controls conversation exporting settings to Insights after
conversation is completed.
If
[retention_strategy][google.cloud.dialogflow.cx.v3beta1.SecuritySettings.retention_strategy]
is set to REMOVE_AFTER_CONVERSATION, Insights export is
disabled no matter what you configure here.
"""
class RedactionStrategy(proto.Enum):
r"""Defines how we redact data."""
REDACTION_STRATEGY_UNSPECIFIED = 0
REDACT_WITH_SERVICE = 1
class RedactionScope(proto.Enum):
r"""Defines what types of data to redact."""
REDACTION_SCOPE_UNSPECIFIED = 0
REDACT_DISK_STORAGE = 2
class PurgeDataType(proto.Enum):
r"""Type of data we purge after retention settings triggers
purge.
"""
PURGE_DATA_TYPE_UNSPECIFIED = 0
DIALOGFLOW_HISTORY = 1
class InsightsExportSettings(proto.Message):
r"""Settings for exporting conversations to
`Insights <https://cloud.google.com/dialogflow/priv/docs/insights>`__.
Attributes:
enable_insights_export (bool):
If enabled, we will automatically exports
conversations to Insights and Insights runs its
analyzers.
"""
enable_insights_export = proto.Field(
proto.BOOL,
number=1,
)
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
redaction_strategy = proto.Field(
proto.ENUM,
number=3,
enum=RedactionStrategy,
)
redaction_scope = proto.Field(
proto.ENUM,
number=4,
enum=RedactionScope,
)
inspect_template = proto.Field(
proto.STRING,
number=9,
)
deidentify_template = proto.Field(
proto.STRING,
number=17,
)
retention_window_days = proto.Field(
proto.INT32,
number=6,
oneof='data_retention',
)
purge_data_types = proto.RepeatedField(
proto.ENUM,
number=8,
enum=PurgeDataType,
)
insights_export_settings = proto.Field(
proto.MESSAGE,
number=13,
message=InsightsExportSettings,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.448171
| 143
| 0.652182
|
bd1993c8b031caede7d397417c7ec9bc644abefe
| 8,526
|
py
|
Python
|
rrd/model/portal/alarm.py
|
ning1875/falcon-dashboard
|
c04e625c49358a278d5f1663a8055627eeb56334
|
[
"Apache-2.0"
] | 1
|
2020-07-09T00:40:42.000Z
|
2020-07-09T00:40:42.000Z
|
rrd/model/portal/alarm.py
|
ning1875/falcon-dashboard
|
c04e625c49358a278d5f1663a8055627eeb56334
|
[
"Apache-2.0"
] | null | null | null |
rrd/model/portal/alarm.py
|
ning1875/falcon-dashboard
|
c04e625c49358a278d5f1663a8055627eeb56334
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .bean import Bean
from rrd.store import alarm_db
class Event(Bean):
_db = alarm_db
_tbl = 'events'
_cols = 'id, event_caseId, step, cond, status, timestamp'
def __init__(self, id, event_caseId, step, cond, status, timestamp):
self.id = id
self.event_caseId = event_caseId
self.step = step
self.cond = cond
self.status = status
self.timestamp = timestamp
@classmethod
def query(cls, page, limit, event_caseId):
where = 'event_caseId = %s'
params = [event_caseId]
vs = cls.select_vs(where=where, params=params, page=page, limit=limit, order='timestamp desc')
total = cls.total(where, params)
return vs, total
class EventCase(Bean):
_db = alarm_db
_tbl = 'event_cases'
_cols = 'id, endpoint, metric, func, cond, note, max_step, current_step, priority, status, timestamp, update_at, closed_at, closed_note, user_modified, tpl_creator, expression_id, strategy_id, template_id, process_note, process_status'
def __init__(self, id, endpoint, metric, func, cond, note, max_step, current_step, priority,\
status, timestamp, update_at, closed_at, closed_note, user_modified, tpl_creator, \
expression_id, strategy_id, template_id, process_note, process_status,grp_name=None):
self.id = id
self.endpoint = endpoint
self.metric = metric
self.func = func
self.cond = cond
self.note = note
self.max_step = max_step
self.current_step = current_step
self.priority = priority
self.status = status
self.timestamp = timestamp
self.update_at = update_at
self.closed_at = closed_at
self.closed_note = closed_note
self.user_modified = user_modified
self.tpl_creator = tpl_creator
self.expression_id = expression_id
self.strategy_id = strategy_id
self.template_id = template_id
self.process_note = process_note
self.process_status = process_status
self.grp_name = grp_name
@classmethod
def query(cls, page, limit, endpoint_query, metric_query, status):
where = '1=1'
params = []
if status == "PROBLEM" or status == "OK":
where = 'status = %s'
params = [status]
if endpoint_query != "":
where += ' and endpoint like %s'
params.append('%' + endpoint_query + '%')
if metric_query != "":
where += ' and metric like %s'
params.append('%' + metric_query + '%')
vs = cls.select_vs(where=where, params=params, page=page, limit=limit, order='update_at desc')
total = cls.total(where, params)
return vs, total
class EventNote(Bean):
_db = alarm_db
_tbl = 'event_note'
_cols = 'id, event_caseId, note, case_id, status, timestamp, user_id'
def __init__(self, id, event_caseId, note, case_id, status, timestamp, user_id):
self.id = id
self.event_caseId = event_caseId
self.note = note
self.case_id = case_id
self.status = status
self.timestamp = timestamp
self.user_id = user_id
#desc events;
#+--------------+------------------+------+-----+-------------------+-----------------------------+
#| Field | Type | Null | Key | Default | Extra |
#+--------------+------------------+------+-----+-------------------+-----------------------------+
#| id | mediumint(9) | NO | PRI | NULL | auto_increment |
#| event_caseId | varchar(50) | YES | MUL | NULL | |
#| step | int(10) unsigned | YES | | NULL | |
#| cond | varchar(200) | NO | | NULL | |
#| status | int(3) unsigned | YES | | 0 | |
#| timestamp | timestamp | NO | | CURRENT_TIMESTAMP | on update CURRENT_TIMESTAMP |
#+--------------+------------------+------+-----+-------------------+-----------------------------+
#
#desc event_note;
#+--------------+------------------+------+-----+-------------------+-----------------------------+
#| Field | Type | Null | Key | Default | Extra |
#+--------------+------------------+------+-----+-------------------+-----------------------------+
#| id | mediumint(9) | NO | PRI | NULL | auto_increment |
#| event_caseId | varchar(50) | YES | MUL | NULL | |
#| note | varchar(300) | YES | | NULL | |
#| case_id | varchar(20) | YES | | NULL | |
#| status | varchar(15) | YES | | NULL | |
#| timestamp | timestamp | NO | | CURRENT_TIMESTAMP | on update CURRENT_TIMESTAMP |
#| user_id | int(10) unsigned | YES | MUL | NULL | |
#+--------------+------------------+------+-----+-------------------+-----------------------------+
#
#desc event_cases;
#+----------------+------------------+------+-----+-------------------+-----------------------------+
#| Field | Type | Null | Key | Default | Extra |
#+----------------+------------------+------+-----+-------------------+-----------------------------+
#| id | varchar(50) | NO | PRI | NULL | |
#| endpoint | varchar(100) | NO | MUL | NULL | |
#| metric | varchar(200) | NO | | NULL | |
#| func | varchar(50) | YES | | NULL | |
#| cond | varchar(200) | NO | | NULL | |
#| note | varchar(500) | YES | | NULL | |
#| max_step | int(10) unsigned | YES | | NULL | |
#| current_step | int(10) unsigned | YES | | NULL | |
#| priority | int(6) | NO | | NULL | |
#| status | varchar(20) | NO | | NULL | |
#| timestamp | timestamp | NO | | CURRENT_TIMESTAMP | on update CURRENT_TIMESTAMP |
#| update_at | timestamp | YES | | NULL | |
#| closed_at | timestamp | YES | | NULL | |
#| closed_note | varchar(250) | YES | | NULL | |
#| user_modified | int(10) unsigned | YES | | NULL | |
#| tpl_creator | varchar(64) | YES | | NULL | |
#| expression_id | int(10) unsigned | YES | | NULL | |
#| strategy_id | int(10) unsigned | YES | | NULL | |
#| template_id | int(10) unsigned | YES | | NULL | |
#| process_note | mediumint(9) | YES | | NULL | |
#| process_status | varchar(20) | YES | | unresolved | |
#+----------------+------------------+------+-----+-------------------+-----------------------------+
| 52.956522
| 239
| 0.421065
|
af5f32fb2a5fdd84ae0fd4a61a802c4acf42dda1
| 393
|
py
|
Python
|
CtCI/Chapter2/8_Loop_Detection.py
|
wqw547243068/DS_Algorithm
|
6d4a9baeb3650a8f93308c7405c9483bac59e98b
|
[
"RSA-MD"
] | 9
|
2020-07-02T06:06:17.000Z
|
2022-02-26T11:08:09.000Z
|
CtCI/Chapter2/8_Loop_Detection.py
|
wqw547243068/DS_Algorithm
|
6d4a9baeb3650a8f93308c7405c9483bac59e98b
|
[
"RSA-MD"
] | 1
|
2021-11-04T17:26:36.000Z
|
2021-11-04T17:26:36.000Z
|
CtCI/Chapter2/8_Loop_Detection.py
|
wqw547243068/DS_Algorithm
|
6d4a9baeb3650a8f93308c7405c9483bac59e98b
|
[
"RSA-MD"
] | 8
|
2021-01-31T10:31:12.000Z
|
2022-03-13T09:15:55.000Z
|
from LinkedList import LinkedList
def loop_detection(ll):
fast = slow = ll.head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast is slow:
break
if fast is None or fast.next is None:
return None
slow = ll.head
while fast is not slow:
fast = fast.next
slow = slow.next
return fast
| 17.863636
| 41
| 0.580153
|
ca5e88db15dc5296bdc875b02cd70547410060ea
| 497
|
py
|
Python
|
team/migrations/0009_auto_20161031_2306.py
|
18F/acquisitions.18f.gov
|
7ef7091fd65b4b6797ddeb1c1f56def29522c43b
|
[
"CC0-1.0"
] | 3
|
2016-11-27T05:02:52.000Z
|
2017-01-31T17:36:36.000Z
|
team/migrations/0009_auto_20161031_2306.py
|
18F/acquisitions.18f.gov
|
7ef7091fd65b4b6797ddeb1c1f56def29522c43b
|
[
"CC0-1.0"
] | 61
|
2016-11-05T00:27:34.000Z
|
2017-09-15T23:37:58.000Z
|
team/migrations/0009_auto_20161031_2306.py
|
18F/acquisitions.18f.gov
|
7ef7091fd65b4b6797ddeb1c1f56def29522c43b
|
[
"CC0-1.0"
] | 2
|
2017-07-14T06:21:26.000Z
|
2021-02-14T11:53:05.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-31 23:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('team', '0008_auto_20161031_2304'),
]
operations = [
migrations.AlterField(
model_name='teammate',
name='photo',
field=models.ImageField(default='/team/photos/default.png', upload_to='/team/photos'),
),
]
| 23.666667
| 98
| 0.625755
|
108d42d7692c4bc0b91e0d263c5a7312b67143db
| 4,282
|
py
|
Python
|
src/sentry/models/processingissue.py
|
JannKleen/sentry
|
8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88
|
[
"BSD-3-Clause"
] | 1
|
2019-02-27T15:13:06.000Z
|
2019-02-27T15:13:06.000Z
|
src/sentry/models/processingissue.py
|
rmax/sentry
|
8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/processingissue.py
|
rmax/sentry
|
8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.models.processingissue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2017 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from hashlib import sha1
from django.db import models
from django.db.models.aggregates import Count
from django.utils import timezone
from sentry.db.models import (BaseManager, Model, FlexibleForeignKey, GzippedDictField, sane_repr)
def get_processing_issue_checksum(scope, object):
h = sha1()
h.update(scope.encode('utf-8') + '\x00')
h.update(object.encode('utf-8') + '\x00')
return h.hexdigest()
class ProcessingIssueManager(BaseManager):
def with_num_events(self):
return self.annotate(num_events=Count('eventprocessingissue'))
def resolve_processing_issue(self, project, scope, object, type=None):
"""Resolves the given processing issues. If not type is given
all processing issues for scope and object are resolved regardless
of the type.
"""
checksum = get_processing_issue_checksum(scope, object)
q = ProcessingIssue.objects.filter(
project=project,
checksum=checksum,
)
if type is not None:
q = q.filter(type=type)
q.delete()
def resolve_all_processing_issue(self, project):
"""
Resolves all processing issues.
"""
q = ProcessingIssue.objects.filter(
project=project,
)
q.delete()
def find_resolved(self, project_id, limit=100):
"""Returns a list of raw events that generally match the given
processing issue and no longer have any issues remaining. Returns
a list of raw events that are now resolved and a bool that indicates
if there are more.
"""
from sentry.models import RawEvent
rv = list(
RawEvent.objects.filter(project_id=project_id)
.annotate(eventissue_count=Count('eventprocessingissue'))
.filter(eventissue_count=0)[:limit]
)
if len(rv) > limit:
rv = rv[:limit]
has_more = True
else:
has_more = False
rv = list(rv)
RawEvent.objects.bind_nodes(rv, 'data')
return rv, has_more
def record_processing_issue(self, raw_event, scope, object, type, data=None):
"""Records a new processing issue for the given raw event."""
data = dict(data or {})
checksum = get_processing_issue_checksum(scope, object)
data['_scope'] = scope
data['_object'] = object
issue, _ = ProcessingIssue.objects.get_or_create(
project_id=raw_event.project_id,
checksum=checksum,
type=type,
defaults=dict(data=data),
)
ProcessingIssue.objects \
.filter(pk=issue.id) \
.update(datetime=timezone.now())
# In case the issue moved away from unresolved we want to make
# sure it's back to unresolved
EventProcessingIssue.objects.get_or_create(
raw_event=raw_event,
processing_issue=issue,
)
class ProcessingIssue(Model):
__core__ = False
project = FlexibleForeignKey('sentry.Project', db_index=True)
checksum = models.CharField(max_length=40, db_index=True)
type = models.CharField(max_length=30)
data = GzippedDictField()
datetime = models.DateTimeField(default=timezone.now)
objects = ProcessingIssueManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_processingissue'
unique_together = (('project', 'checksum', 'type'), )
__repr__ = sane_repr('project_id')
@property
def scope(self):
return self.data['_scope']
@property
def object(self):
return self.data['_object']
class EventProcessingIssue(Model):
__core__ = False
raw_event = FlexibleForeignKey('sentry.RawEvent')
processing_issue = FlexibleForeignKey('sentry.ProcessingIssue')
class Meta:
app_label = 'sentry'
db_table = 'sentry_eventprocessingissue'
unique_together = (('raw_event', 'processing_issue'), )
__repr__ = sane_repr('raw_event', 'processing_issue')
| 31.485294
| 98
| 0.641523
|
23df75319f96ca191ca3503110f5869fa23c9221
| 32,586
|
py
|
Python
|
src/soda/core.py
|
dillonhuff/soda-compiler
|
6112001377d11be395cb248f155180bc4fa0e432
|
[
"MIT"
] | 15
|
2019-05-06T05:00:41.000Z
|
2021-01-10T06:11:16.000Z
|
src/soda/core.py
|
dillonhuff/soda-compiler
|
6112001377d11be395cb248f155180bc4fa0e432
|
[
"MIT"
] | 2
|
2020-02-10T00:05:44.000Z
|
2020-05-01T17:43:42.000Z
|
src/soda/core.py
|
UCLA-VAST/soda-compiler
|
9ca1a9372147f37dc654339b391920ac33e91de5
|
[
"MIT"
] | 5
|
2019-07-16T02:28:44.000Z
|
2020-04-27T15:30:17.000Z
|
import collections
import copy
import itertools
import logging
import operator
import cached_property
from haoda import ir
from haoda import util
from haoda.ir import arithmetic
from soda import dataflow
from soda import grammar
from soda import util as soda_util
from soda import visitor
from soda import mutator
_logger = logging.getLogger().getChild(__name__)
class Tensor():
"""A tensor that corresponse to an input, local, or output.
This class is used in the high-level DAG for stencil dependency analysis.
Each tensor either is an input tensor, or has at least 1 parent tensor, which
will be used to generate this tensor. Meanwhile, each tensor either is an
output tensor, or has at least 1 child tensor, which will be computed using
this tensor.
Attributes:
haoda_type: str, type of the tensor element.
parents: Dict from str of name of Tensor to Tensor.
children: Dict from str of name of Tensor to Tensor.
st_ref: Ref, name, index, and latency stored.
offset: int, shift offset in terms of data elements
lets: Lets of computation.
expr: Expr of computation.
ld_refs: Dict from str of name to dict of Ref loaded.
ld_delays: Dict from str of name to extra delay of the input.
Property:
name: str, unique in each SODA program.
st_offset: int, stencil offset in terms of data elements.
st_idx, Tuple of int, the index referenced by its parent stage.
ld_indices: Dict from str of name to dict of accessed indices of the input.
ld_offsets: Dict from str of name to dict of offsets of the input.
"""
def __init__(self, stmt, tile_size):
self.haoda_type = stmt.haoda_type
self._tile_size = tile_size
if isinstance(stmt, grammar.LocalStmtOrOutputStmt):
self.st_ref = copy.copy(stmt.ref)
self.st_ref.parent = self
self.lets = stmt.let
self.expr = stmt.expr
elif isinstance(stmt, grammar.InputStmt):
self._name = stmt.name
self.st_ref = None
self.lets = []
self.expr = None
else:
raise util.InternalError('cannot initialize a Tensor from %s' %
type(stmt))
_logger.debug('tensor initialized from stmt `%s`', stmt)
# pylint: disable=protected-access
_logger.debug(' at tx position %d', stmt._tx_position)
# these fields are to be set externally
self.st_delay = 0
self.parents = collections.OrderedDict()
self.children = collections.OrderedDict()
self.ld_refs = collections.OrderedDict()
self.ld_delays = collections.OrderedDict()
@property
def name(self):
if self.st_ref is not None:
return self.st_ref.name
return self._name
@property
def st_idx(self):
if self.st_ref is not None:
return self.st_ref.idx
return (0,)*len(self._tile_size)
@property
def st_offset(self):
return soda_util.serialize(self.st_idx, self._tile_size) + self.st_delay
@cached_property.cached_property
def ld_indices(self):
return collections.OrderedDict(
(name, collections.OrderedDict((ref.idx, ref) for ref in refs))
for name, refs in self.ld_refs.items())
@cached_property.cached_property
def ld_offsets(self):
return collections.OrderedDict(
(name, collections.OrderedDict(
(soda_util.serialize(ref.idx, self._tile_size), ref) for ref in refs))
for name, refs in self.ld_refs.items())
@property
def c_type(self):
return util.get_c_type(self.haoda_type)
def propagate_type(self):
if self.expr is None:
return
var_types = {}
# pylint: disable=access-member-before-definition
for let in self.lets:
var_types[let.name] = let.haoda_type
def visit_haoda_type(obj, args):
if obj.haoda_type is None:
if isinstance(obj, ir.Var):
obj.haoda_type = var_types[obj.name]
return obj
self.lets = tuple(_.visit(visit_haoda_type) for _ in self.lets)
self.expr = self.expr.visit(visit_haoda_type)
self.st_ref = self.st_ref.visit(visit_haoda_type)
def mutate(self, callback, args=None):
self.lets = tuple(_.visit(callback, args) for _ in self.lets)
self.expr = self.expr.visit(callback, args)
self.st_ref = self.st_ref.visit(callback, args)
def visit_loads(self, callback, args=None):
for let in self.lets:
let.visit(callback, args)
self.expr.visit(callback, args)
def __str__(self):
return '''Tensor
{haoda_type}: {name} = {expr}
store: {st_ref} with delay {st_delay}
parents: {parents}
children: {children}'''.format(
name=self.name, haoda_type=self.haoda_type, expr=self.expr,
parents=util.idx2str(self.parents), children=util.idx2str(self.children),
st_ref=str(self.st_ref), st_delay=self.st_delay)
def is_output(self):
return len(self.children) == 0
def is_input(self):
return len(self.parents) == 0
def is_producer(self):
return not self.is_output()
def is_consumer(self):
return not self.is_input()
class Stencil():
"""
Attributes:
iterate: int, number of iteration to implement.
burst_width: int, width of bits for DRAM burst access.
app_name: str, application's name.
tile_size: List of int.
unroll_factor: int.
dim: int.
param_stmts: List of ParamStmt.
input_stmts: List of InputStmt.
local_stmts: List of LocalStmt.
output_stmts: List of OutputStmt.
Cached properties:
tensors: Dict from str of name to Tensor.
input_names: Tuple of str, names of input tensors.
param_names: Tuple of str, names of param tensors.
local_names: Tuple of str, names of local tensors.
output_names: Tuple of str, names of output tensors.
"""
def __init__(self, **kwargs):
self.iterate = kwargs.pop('iterate')
if self.iterate < 1:
raise util.SemanticError('cannot iterate %d times' % self.iterate)
# platform determined
self.burst_width = kwargs.pop('burst_width')
# application determined
self.app_name = kwargs.pop('app_name')
# parameters that can be explored
self.tile_size = tuple(kwargs.pop('tile_size'))
self.unroll_factor = kwargs.pop('unroll_factor')
# stage-independent
self.dim = kwargs.pop('dim')
self.param_stmts = kwargs.pop('param_stmts')
# stage-specific
self.input_stmts = kwargs.pop('input_stmts')
self.local_stmts = kwargs.pop('local_stmts')
self.output_stmts = kwargs.pop('output_stmts')
if 'dram_in' in kwargs:
dram_in = kwargs.pop('dram_in')
if dram_in is not None:
if ':' in dram_in:
input_stmt_map = {_.name : _ for _ in self.input_stmts}
for dram_map in dram_in.split('^'):
var_name, bank_list = dram_map.split(':')
if var_name not in input_stmt_map:
raise util.SemanticError('no input named `{}`'.format(var_name))
input_stmt_map[var_name].dram = tuple(map(int,
bank_list.split('.')))
else:
for input_stmt in self.input_stmts:
input_stmt.dram = tuple(map(int, dram_in.split('.')))
if 'dram_out' in kwargs:
dram_out = kwargs.pop('dram_out')
if dram_out is not None:
if ':' in dram_out:
output_stmt_map = {_.name : _ for _ in self.output_stmts}
for dram_map in dram_out.split(','):
var_name, bank_list = dram_map.split(':')
if var_name not in output_stmt_map:
raise util.SemanticError('no output named `{}`'.format(var_name))
output_stmt_map[var_name].dram = tuple(map(int,
bank_list.split('.')))
else:
for output_stmt in self.output_stmts:
output_stmt.dram = tuple(map(int, dram_out.split('.')))
if self.iterate > 1:
if len(self.input_stmts) != len(self.output_stmts):
raise util.SemanticError(
'number of input tensors must be the same as output if iterate > 1 '
'times, currently there are %d input(s) but %d output(s)' %
(len(self.input_stmts), len(self.output_stmts)))
if self.input_types != self.output_types:
raise util.SemanticError(
'input must have the same type(s) as output if iterate > 1 '
'times, current input has type %s but output has type %s' %
(util.lst2str(self.input_types), util.lst2str(self.output_types)))
_logger.debug('pipeline %d iterations of [%s] -> [%s]' % (self.iterate,
', '.join('%s: %s' % (stmt.haoda_type, stmt.name)
for stmt in self.input_stmts),
', '.join('%s: %s' % (stmt.haoda_type, stmt.name)
for stmt in self.output_stmts)))
for stmt in itertools.chain(self.local_stmts, self.output_stmts):
_logger.debug('simplify %s', stmt.name)
stmt.expr = arithmetic.simplify(stmt.expr)
stmt.let = arithmetic.simplify(stmt.let)
# soda frontend successfully parsed
# triggers cached property
# replicate tensors for iterative stencil
# pylint: disable=pointless-statement
self.tensors
_logger.debug('producer tensors: [%s]',
', '.join(tensor.name for tensor in self.producer_tensors))
_logger.debug('consumer tensors: [%s]',
', '.join(tensor.name for tensor in self.consumer_tensors))
# TODO: build Ref table and Var table
# generate reuse buffers and get haoda nodes
# pylint: disable=pointless-statement
self.dataflow_super_source
_logger.debug('dataflow: %s', self.dataflow_super_source)
_logger.debug('module table: %s', dict(self.module_table))
_logger.debug('module traits: %s', self.module_traits)
@cached_property.cached_property
def dataflow_super_source(self):
return dataflow.create_dataflow_graph(self)
@property
def module_table(self):
return self.dataflow_super_source.module_table
@property
def module_traits(self):
return self.dataflow_super_source.module_traits
@cached_property.cached_property
def input_types(self):
return tuple(tensor.haoda_type for tensor in self.input_stmts)
@cached_property.cached_property
def param_types(self):
return tuple(tensor.haoda_type for tensor in self.param_stmts)
@cached_property.cached_property
def local_types(self):
return tuple(tensor.haoda_type for tensor in self.local_stmts)
@cached_property.cached_property
def output_types(self):
return tuple(tensor.haoda_type for tensor in self.output_stmts)
@cached_property.cached_property
def input_names(self):
return tuple(stmt.name for stmt in self.input_stmts)
@cached_property.cached_property
def param_names(self):
return tuple(stmt.name for stmt in self.param_stmts)
@cached_property.cached_property
def local_names(self):
return tuple(stmt.name for stmt in self.local_stmts)
@cached_property.cached_property
def output_names(self):
return tuple(stmt.name for stmt in self.output_stmts)
@cached_property.cached_property
def symbol_table(self):
"""Constructs a mapping from a tensor's name to its type.
Returns:
tensor_types: dict from name (str) to haoda_type (str).
"""
tensor_types = {}
for name, haoda_type in zip(self.input_names, self.input_types):
tensor_types[name] = haoda_type
for name, haoda_type in zip(self.local_names, self.local_types):
tensor_types[name] = haoda_type
for name, haoda_type in zip(self.output_names, self.output_types):
tensor_types[name] = haoda_type
return tensor_types
@cached_property.cached_property
def tensors(self):
"""Constructs high-level DAG and creates the tensors.
Returns:
An collections.OrderedDict mapping a tensor's name to the tensor.
"""
# TODO: check for name conflicts
tensor_map = collections.OrderedDict()
for stmt in self.input_stmts:
tensor = Tensor(stmt, self.tile_size)
tensor_map[stmt.name] = tensor
def name_in_iter(name, iteration):
if name in self.input_names:
if iteration > 0:
return name+'_iter%d' % iteration
return name
if name in self.output_names:
if iteration < self.iterate-1:
return (self.input_names[self.output_names.index(name)]+
'_iter%d' % (iteration+1))
return name
if name in self.local_names:
if iteration > 0:
return name+'_iter%d' % iteration
return name
if name in self.param_names:
return name
raise util.InternalError('unknown name: %s' % name)
for iteration in range(self.iterate):
_logger.debug('iterate %s', iteration)
_logger.debug('map: %s', self.symbol_table)
def mutate_name_callback(obj, mutated):
if isinstance(obj, ir.Ref):
obj.haoda_type = self.symbol_table[obj.name]
# pylint: disable=cell-var-from-loop
obj.name = name_in_iter(obj.name, iteration)
return obj
tensors = []
for stmt in itertools.chain(self.local_stmts, self.output_stmts):
tensor = Tensor(stmt.visit(mutate_name_callback), self.tile_size)
loads = visitor.get_load_tuple(tensor)
norm_idx = tuple(min(load.idx[d] for load in loads
if load.name not in self.param_names)
for d in range(self.dim))
if any(norm_idx):
_logger.debug('normalize index of %s: (%s)',
tensor.name, ', '.join(map(str, norm_idx)))
mutator.shift(tensor, norm_idx, excluded=self.param_names)
tensor_map[tensor.name] = tensor
tensors.append(tensor)
for tensor in tensors:
_logger.debug('%s', tensor)
for tensor in tensors:
tensor.propagate_type()
loads = visitor.get_load_dict(tensor)
for parent_name, ld_refs in loads.items():
ld_refs = sorted(ld_refs, key=lambda ref: soda_util.serialize(
ref.idx, self.tile_size))
parent_tensor = tensor_map[parent_name]
parent_tensor.children[tensor.name] = tensor
tensor.parents[parent_name] = parent_tensor
tensor.ld_refs[parent_name] = ld_refs
# high-level DAG construction finished
for tensor in tensor_map.values():
if tensor.name in self.input_names:
_logger.debug('<input tensor>: %s', tensor)
elif tensor.name in self.output_names:
_logger.debug('<output tensor>: %s', tensor)
else:
_logger.debug('<local tensor>: %s', tensor)
return tensor_map
@cached_property.cached_property
def chronological_tensors(self):
"""Computes the offsets of tensors.
Returns:
A list of Tensor, in chronological order.
"""
_logger.info('calculate tensor offsets')
processing_queue = collections.deque(list(self.input_names))
processed_tensors = set(self.input_names)
chronological_tensors = list(map(self.tensors.get, self.input_names))
for tensor in chronological_tensors:
_logger.debug('tensor <%s> is at offset %d' %
(tensor.name, tensor.st_offset))
_logger.debug('processing queue: %s', processing_queue)
_logger.debug('processed_tensors: %s', processed_tensors)
while processing_queue:
tensor = self.tensors[processing_queue.popleft()]
_logger.debug('inspecting tensor %s\'s children' % tensor.name)
for child in tensor.children.values():
if ({x.name for x in child.parents.values()} <= processed_tensors
and child.name not in processed_tensors):
# good, all inputs are processed
# can determine offset of current tensor
_logger.debug(
'input%s for tensor <%s> (i.e. %s) %s processed',
'' if len(child.parents) == 1 else 's',
child.name,
', '.join([x.name for x in child.parents.values()]),
'is' if len(child.parents) == 1 else 'are')
stage_offset = soda_util.serialize(child.st_idx, self.tile_size)
# synchronization check
def sync(tensor, offset):
if tensor is None:
return offset
_logger.debug('index of tensor <%s>: %s',
tensor.name, tensor.st_idx)
stage_offset = soda_util.serialize(tensor.st_idx, self.tile_size)
_logger.debug('offset of tensor <%s>: %d',
tensor.name, stage_offset)
loads = visitor.get_load_dict(tensor)
for name in loads:
loads[name] = tuple(ref.idx for ref in loads[name])
_logger.debug('loads: %s', ', '.join(
'%s@%s' % (name, util.lst2str(map(util.idx2str, indices)))
for name, indices in loads.items()))
for n in loads:
loads[n] = soda_util.serialize_iter(loads[n], self.tile_size)
for l in loads.values():
l[0], l[-1] = (stage_offset - max(l), stage_offset - min(l))
del l[1:-1]
if len(l) == 1:
l.append(l[-1])
_logger.debug(
'load offset range in tensor %s: %s', tensor.name, '{%s}' % (
', '.join('%s: [%d:%d]' % (n, *v)
for n, v in loads.items())))
for parent in tensor.parents.values():
tensor_distance = next(reversed(tensor.ld_offsets[parent.name]))
_logger.debug('tensor distance: %s', tensor_distance)
_logger.debug(
'want to access tensor <%s> at offset [%d, %d] '
'to generate tensor <%s> at offset %d',
parent.name, offset+loads[parent.name][0],
offset+loads[parent.name][-1], tensor.name, offset)
tensor_offset = (parent.st_delay+tensor_distance-stage_offset)
if offset < tensor_offset:
_logger.debug(
'but tensor <%s> won\'t be available until offset %d',
parent.name, tensor_offset)
offset = tensor_offset
_logger.debug('need to access tensor <%s> at offset [%d, %d] '
'to generate tensor <%s> at offset %d',
parent.name, offset+loads[parent.name][0],
offset+loads[parent.name][-1], tensor.name,
offset)
return offset
_logger.debug('intend to generate tensor <%s> at offset %d',
child.name, child.st_delay)
synced_offset = sync(child, child.st_delay)
_logger.debug('synced offset: %s', synced_offset)
child.st_delay = synced_offset
_logger.debug('decide to generate tensor <%s> at offset %d',
child.name, child.st_delay)
# add delay
for sibling in child.parents.values():
delay = child.st_delay - (sibling.st_delay +
list(child.ld_offsets[sibling.name].keys())[-1] - stage_offset)
if delay > 0:
_logger.debug(
'tensor %s arrives at tensor <%s> at offset %d < %d; '
'add %d delay', sibling.name, child.name,
sibling.st_delay+next(reversed(
child.ld_offsets[sibling.name]))-stage_offset,
child.st_delay, delay)
else:
_logger.debug(
'tensor %s arrives at tensor <%s> at offset %d = %d; good',
sibling.name, child.name, sibling.st_delay+next(reversed(
child.ld_offsets[sibling.name]))-stage_offset,
child.st_delay)
child.ld_delays[sibling.name] = max(delay, 0)
_logger.debug('set delay of |%s <- %s| to %d' %
(child.name, sibling.name, child.ld_delays[sibling.name]))
processing_queue.append(child.name)
processed_tensors.add(child.name)
chronological_tensors.append(child)
else:
for parent in tensor.parents.values():
if parent.name not in processed_tensors:
_logger.debug('tensor %s requires tensor <%s> as an input',
tensor.name, parent.name)
_logger.debug('but tensor <%s> isn\'t processed yet',
parent.name)
_logger.debug('add %s to scheduling queue',
parent.name)
processing_queue.append(parent.name)
_logger.debug('tensors in insertion order: [%s]',
', '.join(map(str, self.tensors)))
_logger.debug('tensors in chronological order: [%s]',
', '.join(t.name for t in chronological_tensors))
for tensor in self.tensors.values():
for name, indices in tensor.ld_indices.items():
_logger.debug('stage index: %s@%s <- %s@%s',
tensor.name, util.idx2str(tensor.st_idx),
name, util.lst2str(util.idx2str(idx) for idx in indices))
for tensor in self.tensors.values():
if tensor.is_input():
continue
_logger.debug('stage expr: %s = %s', tensor.st_ref, tensor.expr)
for tensor in self.tensors.values():
for name, offsets in tensor.ld_offsets.items():
_logger.debug('stage offset: %s@%d <- %s@%s',
tensor.name, soda_util.serialize(tensor.st_idx,
self.tile_size),
name, util.lst2str(offsets))
for tensor in self.tensors.values():
for name, delay in tensor.ld_delays.items():
_logger.debug('stage delay: %s <- %s delayed %d' %
(tensor.name, name, delay))
return chronological_tensors
@cached_property.cached_property
def input_partition(self):
pixel_width_i = sum(self.pixel_width_i)
if self.burst_width/pixel_width_i*self.dram_bank/2 > self.unroll_factor/2:
return int(self.burst_width/pixel_width_i*self.dram_bank/2)
return int(self.unroll_factor/2)
@cached_property.cached_property
def output_partition(self):
pixel_width_o = sum(self.pixel_width_o)
if self.burst_width/pixel_width_o*self.dram_bank/2 > self.unroll_factor/2:
return int(self.burst_width/pixel_width_o*self.dram_bank/2)
return int(self.unroll_factor/2)
@cached_property.cached_property
def pixel_width_i(self):
return list(map(util.get_width_in_bits, self.input_stmts))
@cached_property.cached_property
def pixel_width_o(self):
return list(map(util.get_width_in_bits, self.output_stmts))
@cached_property.cached_property
def producer_tensors(self):
return tuple(filter(Tensor.is_producer, self.tensors.values()))
@cached_property.cached_property
def consumer_tensors(self):
return tuple(filter(Tensor.is_consumer, self.tensors.values()))
# return [Tensor, ...]
def _get_parent_tensors_for(self, node):
return {x: self.tensors[x]
for x in {x.name for x in node.get_loads()
if x.name not in self.extra_params}}
# return {name: [(idx, ...), ...]}
def _get_window_for(self, node):
loads = node.get_loads() # [Load, ...]
load_names = {l.name for l in loads
if l.name not in self.extra_params}
windows = {name: sorted({l.idx for l in loads if l.name == name},
key=lambda x: soda_util.serialize(x, self.tile_size))
for name in load_names}
_logger.debug('window for %s@(%s) is %s' %
(node.name, ', '.join(map(str, node.expr[0].idx)), windows))
return windows
# return [StageExpr, ...]
def _get_expr_for(self, node):
if isinstance(node, grammar.Output):
return node.expr
if isinstance(node, grammar.Local):
return node.expr
raise util.SemanticError('cannot get expression for %s' % str(type(node)))
@cached_property.cached_property
def reuse_buffers(self):
"""Constructs the reuse buffers.
Returns:
A dict mapping a tensor's name to its reuse buffers.
"""
unroll_factor = self.unroll_factor
self._reuse_buffer_lengths = {}
reuse_buffers = {}
for tensor in self.producer_tensors:
reuse_buffer = _get_reuse_buffer(self.tile_size, tensor, unroll_factor)
reuse_buffer_length = {}
reuse_buffers[tensor.name] = reuse_buffer
self._reuse_buffer_lengths[tensor.name] = reuse_buffer_length
first = [True]*unroll_factor
for start, end in reuse_buffer[1:]:
if first[start%unroll_factor]:
first[start%unroll_factor] = False
if start >= unroll_factor:
reuse_buffer_length[end] = end//unroll_factor
continue
reuse_buffer_length[end] = (end-start)//unroll_factor
return reuse_buffers
@cached_property.cached_property
def all_points(self):
all_points = {}
for tensor in self.producer_tensors:
all_points[tensor.name] = _get_points(self.tile_size, tensor,
self.unroll_factor)
return all_points
@cached_property.cached_property
def next_fifo(self):
"""Constructs the next fifo offset mapping.
Returns:
A dict mapping a tensor's name and offset to the next offset.
"""
next_fifo = {}
for name, reuse_buffer in self.reuse_buffers.items():
next_fifo[name] = {}
for start, end in reuse_buffer[1:]:
if start < end:
next_fifo[name][start] = end
_logger.debug('next_fifo: %s' % next_fifo)
return next_fifo
@cached_property.cached_property
def reuse_buffer_lengths(self):
"""Constructs the reuse buffer lengths.
Returns:
A dict mapping a tensor's name to its reuse buffers' lengths.
"""
# pylint: disable=pointless-statement
self.reuse_buffers
return self._reuse_buffer_lengths
def _get_reuse_chains(tile_size, tensor, unroll_factor):
"""Generates reuse chains for a Tensor.
Generates reuse chains for a Tensor under the given tile size and unroll
factor.
Args:
tile_size: An iterable representing the tile size in each dimension.
tensor: A Tensor to which the reuse chains belongs.
unroll_factor: An int representing the unroll factor.
Returns:
A list of tuples where each tuple represents a reuse chain and each
element of the tuple represents the offset from the lastest input.
"""
_logger.debug('get reuse chains of tensor %s', tensor.name)
def unroll_offsets(offsets, child):
unrolled_offsets = set()
for unroll_idx in range(unroll_factor):
for offset in offsets:
unrolled_offsets.add(max(offsets) + unroll_idx - offset +
child.ld_delays[tensor.name])
return unrolled_offsets
A_dag = set()
for child in tensor.children.values():
A_dag |= unroll_offsets(
soda_util.serialize_iter(child.ld_indices[tensor.name], tile_size), child)
_logger.debug('A† of tensor %s: %s', tensor.name, A_dag)
chains = []
for chain_idx in reversed(range(unroll_factor)):
chains.append(tuple(sorted(
offset for offset in A_dag if offset % unroll_factor == chain_idx)))
_logger.debug('reuse chains: %s', chains)
for idx, chain in enumerate(chains):
_logger.debug('reuse chain %d of tensor %s: %s', idx, tensor.name, chain)
return chains
def _get_points(tile_size, tensor, unroll_factor):
"""Generates offset-to-point mapping for a Tensor.
Generates a mapping which can be used to determine the accessed point index
from the offset for a Tensor, under the given tile size and unroll factor.
Args:
tile_size: An iterable representing the tile size in each dimension.
tensor: A Tensor to which the mapping belongs.
unroll_factor: An int representing the unroll factor.
Returns:
A dict of name str to a dict of offset to a dict of unroll index to
point index.
"""
all_points = {} # {name: {offset: {unroll_idx: point_idx}}}
for child in tensor.children.values():
all_points[child.name] = {}
offsets = child.ld_offsets[tensor.name]
for unroll_idx in range(unroll_factor):
for idx, offset in enumerate(offsets):
all_points[child.name].setdefault(
max(offsets) - offset + child.ld_delays[tensor.name] + unroll_idx,
{})[unroll_factor-1-unroll_idx] = idx
for child in tensor.children.values():
for offset, points in all_points[child.name].items():
for unroll_idx, point in points.items():
_logger.debug(
'%s <- %s @ offset=%d <=> %s @ unroll_idx=%d',
child.name, tensor.name, offset,
util.idx2str(list(child.ld_indices[tensor.name].values())[point].idx),
unroll_idx)
return all_points
def _get_reuse_buffer(tile_size, tensor, unroll_factor):
"""Generates reuse buffer for a Tensor.
Generates a list representing the reuse buffer for a Tensor, under the given
tile size and unroll factor.
Args:
tile_size: An iterable representing the tile size in each dimension.
tensor: A Tensor to which the mapping belongs.
unroll_factor: An int representing the unroll factor.
Returns:
A list whose first element is an int representing the length of the
reuse buffer (capacity of data element), followed by unroll_factor
number of (start, end) tuples, where start and end are the offsets from
the lastest input of each piece of the reuse buffer.
"""
reuse_buffer = [None] # [length, (start, end), (start, end), ...]
offsets = []
for chain in _get_reuse_chains(tile_size, tensor, unroll_factor):
reuse_buffer.append((chain[0], chain[0]))
offsets.append(chain[0])
for j in range(len(chain)-1):
reuse_buffer.append((chain[j], chain[j+1]))
offsets.append(chain[j+1])
reuse_buffer[0] = max(offsets)+1
_logger.debug('reuse chains of tensor %s: %s' % (tensor.name, reuse_buffer))
return reuse_buffer
def get_indices_id(indices):
return '_'.join(str(idx).replace('-', 'm') for idx in indices)
def get_stencil_distance(stencil_window, tile_size):
return (max(soda_util.serialize_iter(stencil_window, tile_size)) +
soda_util.serialize(get_stencil_window_offset(stencil_window),
tile_size))
def get_stencil_dim(points):
dimension = len(next(iter(points)))
return [max_index-min_index+1 for max_index, min_index in zip(
[max([point[dim] for point in points]) for dim in range(dimension)],
[min([point[dim] for point in points]) for dim in range(dimension)])]
_overall_stencil_window_cache = {}
def get_overall_stencil_window(input_tensor, output_tensor):
if isinstance(input_tensor, collections.Iterable):
all_points = tuple(sorted(set.union(*(
set(get_overall_stencil_window(_, output_tensor))
for _ in input_tensor))))
_logger.debug(
'overall stencil window of %s (%s) <- {%s} is %s (%d points)',
output_tensor.name, ', '.join(['0']*len(output_tensor.st_idx)),
', '.join(_.name for _ in input_tensor), all_points, len(all_points))
return all_points
# normalize store index to 0
idx = (id(input_tensor), id(output_tensor))
if idx in _overall_stencil_window_cache:
return _overall_stencil_window_cache[idx]
_logger.debug('get overall stencil window of %s <- %s',
output_tensor.name, input_tensor.name)
all_points = set()
for name, points in output_tensor.ld_indices.items():
_logger.debug('%s@%s <- %s', output_tensor.name,
util.idx2str(output_tensor.st_idx),
util.idx2str(points.values()))
if name != input_tensor.name:
recursive_points = get_overall_stencil_window(
input_tensor, output_tensor.parents[name])
_logger.debug('recursive points: %s', util.idx2str(recursive_points))
all_points |= set.union(*[{
tuple(map(lambda a, b, c: a + b - c, _, point, output_tensor.st_idx))
for _ in recursive_points} for point in points])
else:
all_points |= {tuple(map(operator.sub, point, output_tensor.st_idx))
for point in points}
all_points = tuple(sorted(all_points))
_logger.debug('overall stencil window of %s (%s) <- %s is %s (%d points)',
output_tensor.name, ', '.join(['0']*len(output_tensor.st_idx)),
input_tensor.name, all_points, len(all_points))
_overall_stencil_window_cache[idx] = all_points
return all_points
def get_stencil_window_offset(stencil_window):
# only works if window is normalized to store at 0
return tuple(-min(p[d] for p in stencil_window)
for d in range(len(next(iter(stencil_window)))))
| 38.978469
| 80
| 0.647517
|
3c9c9a3ffa5829bc16a00a51fd7af33a19fbc267
| 3,639
|
py
|
Python
|
script_python/tsne.py
|
goldleaf3i/generativeCMLgraphs
|
143b7809ee9cf39a508bdbf5c91ed963b810b403
|
[
"MIT"
] | null | null | null |
script_python/tsne.py
|
goldleaf3i/generativeCMLgraphs
|
143b7809ee9cf39a508bdbf5c91ed963b810b403
|
[
"MIT"
] | null | null | null |
script_python/tsne.py
|
goldleaf3i/generativeCMLgraphs
|
143b7809ee9cf39a508bdbf5c91ed963b810b403
|
[
"MIT"
] | null | null | null |
#
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.5.1, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
# The example can be run by executing: ipython tsne.py -pylab
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import pylab as Plot
import matplotlib.cm as cm
def tsne(P = Math.array([])):
# Initialize variables
# number of instances
n = Math.size(P, 0);
# initial momentum
initial_momentum = 0.5;
# value to which momentum is changed
final_momentum = 0.8;
# iteration at which momentum is changed
mom_switch_iter = 250;
# iteration at which lying about P-values is stopped
stop_lying_iter = 100;
# maximum number of iterations
max_iter = 1000;
# initial learning rate
epsilon = 500;
# minimum gain for delta-bar-delta
min_gain = 0.01;
# Make sure P-vals are set properly
# set diagonal to zero
Math.fill_diagonal(P, 0);
# symmetrize P-values
P = 0.5 * (P + P.T);
# make sure P-values sum to one
P = Math.maximum(P / Math.sum(P[:]), 1e-12);
# constant in KL divergence
const = Math.sum(P[:] * Math.log(P[:]));
# lie about the P-vals to find better local minima
P = P * 4;
# Initialize the solution
Y = 0.0001 * Math.random.randn(n, 2);
iY = Math.zeros((n, 2));
gains = Math.ones((n, 2));
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient (faster implementation)
L = (P - Q) * num;
y_grads = Math.dot(4 * (Math.diag(Math.sum(L, 0)) - L), Y);
# update the solution
gains = (gains + 0.2) * ((y_grads > 0) != (iY > 0)) + (gains * 0.8) * ((y_grads > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = initial_momentum * iY - epsilon * (gains * y_grads);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# update the momentum if necessary
if iter == mom_switch_iter:
initial_momentum = final_momentum
if iter == stop_lying_iter:
P = P / 4;
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = const - Math.sum(P[:] * Math.log(Q[:]));
print("Iteration ", (iter + 1), ": error is ", C);
return Y;
def plotting():
cluster_labels = set(labels);
# iteratore di colori su un insieme grande quanto il numero di cluster
colors = iter(cm.rainbow(Math.linspace(0, 1, len(cluster_labels))))
for c in cluster_labels:
Xc = [];
Yc = [];
for i in range(len(Y)):
if labels[i] == c:
Xc.append(Y[i][0]);
Yc.append(Y[i][1]);
Plot.scatter(Xc[:], Yc[:], 30, color=next(colors), label=c);
Plot.legend(scatterpoints=1, loc='lower left', fontsize=5);
Plot.savefig("tsne_scatter_plot.png");
if __name__ == "__main__":
print("tsne performs symmetric t-SNE on affinity matrix P");
P = Math.loadtxt("similarity_matrix.txt",delimiter=',');
labels = Math.loadtxt("cluster_labels.txt",delimiter=',');
Y = tsne(P);
plotting();
| 34.009346
| 109
| 0.594394
|
0f5ec81bfd32c1e6a787200ebc093f38943d11bf
| 11,065
|
py
|
Python
|
FlaskApp/routes.py
|
mikkokallio/dbapp-code
|
2df452eaef5ea375569d0076129ab65e4ae067e9
|
[
"MIT"
] | null | null | null |
FlaskApp/routes.py
|
mikkokallio/dbapp-code
|
2df452eaef5ea375569d0076129ab65e4ae067e9
|
[
"MIT"
] | null | null | null |
FlaskApp/routes.py
|
mikkokallio/dbapp-code
|
2df452eaef5ea375569d0076129ab65e4ae067e9
|
[
"MIT"
] | null | null | null |
import json
import secrets
from os import abort
from os import getenv
import requests
from flask import redirect, render_template, request, session
from werkzeug.security import check_password_hash, generate_password_hash
from .app import app
from . import actions
@app.route("/")
def index():
"""Show user's profile unless user hasn't logged in."""
if "username" in session:
return redirect("/profile")
return render_template("index.html", mode="4")
@app.route("/new_place")
def new_place():
"""Show street address selector view."""
return render_template("pick_place.html")
@app.route("/search_places", methods=["POST"])
def search_places():
"""Fetch addresses based on user input."""
if "username" not in session:
return redirect("/")
location = request.form["location"]
key = getenv("MAPS_API_KEY")
url = f"https://atlas.microsoft.com/search/address/json?&subscription-key={key}&api-version=1.0&language=en-US&query={location}"
response = requests.get(url)
places = json.loads(response.text)[
"results"] if response.status_code == 200 else None
return render_template("pick_place.html", location=location, places=places)
@app.route("/add_place", methods=["POST"])
def add_place():
"""Open form for filling in information about a place."""
if "username" not in session:
return redirect("/")
address = request.form["address"]
latitude = request.form["latitude"]
longitude = request.form["longitude"]
return render_template("edit_place.html", fields={
"address": address, "latitude": latitude, "longitude": longitude})
@app.route("/save_place", methods=["POST"])
def save_place():
"""Save submitted place information."""
if "username" not in session:
return redirect("/")
if session["csrf_token"] != request.form["csrf_token"]:
abort(403)
fields = request.form
messages = actions.validate_place(fields)
if len(messages) > 0:
return render_template("edit_place.html", messages=messages, fields=fields)
messages = actions.save_place(fields)
if len(messages) == 0:
return redirect("/places")
return render_template("edit_place.html", messages=messages, fields=fields)
@app.route("/places")
def list_places():
"""Show a list of all places."""
if "username" not in session:
return redirect("/")
places = actions.get_places()
return render_template("places.html", places=places, mode="2")
@app.route("/login", methods=["POST"])
def login():
"""Log user in using password and username from form."""
username = request.form["username"]
password = request.form["password"]
user = None
try:
user = actions.get_user_by_name(username)
except AttributeError:
return render_template("index.html", mode="4",
messages=["A problem occurred while fetching user data."])
if not user:
return render_template("index.html", mode="4",
messages=["Username does not exist"])
hash_value = user.password
if check_password_hash(hash_value, password):
session["username"] = username
session["role"] = user.role
session["id"] = user.id
session["csrf_token"] = secrets.token_hex(16)
return redirect("/events")
return render_template("index.html", mode="4",
messages=["Wrong password"])
@app.route("/logout")
def logout():
"""Log user out by removing session data."""
del session["username"]
del session["role"]
del session["id"]
del session["csrf_token"]
return redirect("/")
@app.route("/profile")
def show_profile():
"""Show information about user logged in."""
if "username" not in session:
return redirect("/")
user = actions.get_user_by_name(session["username"])
my_events = actions.get_organized_events_by_user_id(session["id"])
other_events = actions.get_registered_events_by_user_id(session["id"])
my_count = len(my_events)
other_count = len(other_events)
return render_template("profile.html", user=user, my_events=my_events, mode="1",
other_events=other_events, my_count=my_count, other_count=other_count)
@app.route("/new_user")
def new_user():
"""Show form for entering a new username and password."""
return render_template("new_user.html", fields=None, mode="4")
@app.route("/edit_user")
def edit_user():
"""Show form for filling in other user information."""
if "username" not in session:
return redirect("/")
user = actions.get_user_by_name(session["username"])
return render_template("edit_user.html", user=user)
@app.route("/add_user", methods=["POST"])
def add_user():
"""Save submitted username and password."""
username = request.form["username"]
password = request.form["password"]
password2 = request.form["password2"]
messages = []
messages.extend(actions.validate_username(username))
messages.extend(actions.validate_password(password))
if password != password2:
messages.append("Passwords don't match")
if len(messages) > 0:
return render_template("new_user.html", messages=messages, fields=request.form, mode="4")
hash_value = generate_password_hash(password)
messages = actions.add_user(username, hash_value)
if len(messages) == 0:
session["username"] = username
session["role"] = "user"
session["id"] = actions.get_user_by_name(username).id
session["csrf_token"] = secrets.token_hex(16)
return render_template("edit_user.html", new_user=True, user=None, mode="4")
return render_template("new_user.html", messages=messages, fields=request.form, mode="4")
@app.route("/update_user", methods=["POST"])
def update_user():
"""Save submitted user information."""
if "username" not in session:
return redirect("/")
if session["csrf_token"] != request.form["csrf_token"]:
abort(403)
date_of_birth = request.form["date_of_birth"]
gender = request.form["gender"]
description = request.form["description"]
mode = request.form["mode"]
messages = actions.validate_user(request.form)
if len(messages) > 0:
return render_template("edit_user.html", messages=messages, user=request.form, mode=mode)
messages = actions.update_user(
session["username"], date_of_birth, gender, description)
if len(messages) > 0:
return render_template("edit_user.html", messages=messages, user=request.form, mode=mode)
return redirect("/profile")
@app.route("/new_event")
def new_event():
"""Open form to create new event from scratch."""
if "username" not in session:
return redirect("/")
places = actions.get_places()
return render_template("edit_event.html", fields=None, places=places, id="")
@app.route("/edit_event", methods=["POST"])
def edit_event():
"""Open event form for editing."""
if "username" not in session:
return redirect("/")
if not request.form["event_id"]:
return redirect("/events")
id = request.form["event_id"]
event = actions.get_event_by_id(id)
places = actions.get_places()
if session["username"] != event.username:
return redirect("/events")
return render_template("edit_event.html", fields=event, places=places, id=id)
@app.route("/delete_event", methods=["POST"])
def del_event():
"""Delete event using id from currently viewed event."""
if "username" not in session:
return redirect("/")
if not request.form["event_id"]:
return redirect("/events")
id = request.form["event_id"]
messages = actions.delete_event_by_id(id, session["id"])
events = actions.get_upcoming_events()
past_events = actions.get_past_events()
return render_template("events.html", count=len(events), events=events, past_events=past_events,
events_view=True, messages=messages)
@app.route("/add_event", methods=["POST"])
def update_event():
"""Save event form fields."""
if "username" not in session:
return redirect("/")
if session["csrf_token"] != request.form["csrf_token"]:
abort(403)
fields = request.form
places = actions.get_places()
messages = actions.validate_event(fields)
if len(messages) > 0:
return render_template("edit_event.html", messages=messages, fields=fields,
places=places, id=fields["event_id"])
messages = actions.upsert_event(session["id"], fields)
if len(messages) > 0:
return render_template("edit_event.html", messages=messages, fields=fields,
places=places, id=fields["event_id"])
return redirect("/events")
@app.route("/events")
def list_events():
"""Show a view with past and future events."""
if "username" not in session:
return redirect("/")
events = actions.get_upcoming_events()
past_events = actions.get_past_events()
return render_template("events.html", count=len(events), past_count=len(past_events),
events=events, past_events=past_events, events_view=True, mode="3")
@app.route("/event/<int:id>")
def show_event(id):
"""Show more detailed information about one event."""
if "username" not in session:
return redirect("/")
event = actions.get_event_by_id(id)
pos = f"[{event.location[1:-1]}]"
comments = actions.get_comments_by_event_id(id)
signups = actions.get_signups_by_event_id(id)
going = len(signups)
past = actions.is_past_event(event)
if session["username"] == event.username:
user_going = True
else:
user_going = actions.get_signup_by_id(id, session["id"])
return render_template("event.html", id=id, unit=event, comments=comments, signups=signups,
pos=pos, going=going, user_going=user_going, past=past,
key=getenv("MAPS_API_KEY"))
@app.route("/write_comment", methods=["POST"])
def send_comment():
"""Save a new comment."""
if "username" not in session:
return redirect("/")
if session["csrf_token"] != request.form["csrf_token"]:
abort(403)
event_id = request.form["event_id"]
comment = request.form["comment"]
if len(comment) > 0:
actions.send_comment(event_id, session["id"], comment)
return redirect(f"/event/{event_id}")
@app.route("/signup", methods=["POST"])
def sign_up():
"""Save registration to event or remove it."""
if "username" not in session:
return redirect("/")
if session["csrf_token"] != request.form["csrf_token"]:
abort(403)
event_id = request.form["event_id"]
max_people = request.form["max_people"]
signups = len(actions.get_signups_by_event_id(event_id))
actions.add_or_remove_signup(event_id, session["id"], max_people, signups)
return redirect(f"/event/{event_id}")
| 34.256966
| 132
| 0.658653
|
6220e1dad304508f4bc3bde73f96e72d974db339
| 33,716
|
py
|
Python
|
src/sage/algebras/quantum_matrix_coordinate_algebra.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 3
|
2019-07-15T13:48:24.000Z
|
2019-11-08T12:31:43.000Z
|
src/sage/algebras/quantum_matrix_coordinate_algebra.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 2
|
2018-10-30T13:40:20.000Z
|
2020-07-23T12:13:30.000Z
|
src/sage/algebras/quantum_matrix_coordinate_algebra.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 1
|
2020-07-23T10:29:58.000Z
|
2020-07-23T10:29:58.000Z
|
r"""
Quantum Matrix Coordinate Algebras
AUTHORS:
- Travis Scrimshaw (01-2016): initial version
"""
##############################################################################
# Copyright (C) 2016 Travis Scrimshaw <tscrimsh at umn.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
##############################################################################
from six.moves import range
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.sets.family import Family
from sage.categories.algebras import Algebras
from sage.categories.bialgebras import Bialgebras
from sage.categories.hopf_algebras import HopfAlgebras
from sage.combinat.free_module import CombinatorialFreeModule
from sage.monoids.indexed_free_monoid import IndexedFreeAbelianMonoid
from sage.rings.polynomial.laurent_polynomial_ring import LaurentPolynomialRing
from sage.rings.all import ZZ
class QuantumMatrixCoordinateAlgebra_abstract(CombinatorialFreeModule):
"""
Abstract base class for quantum coordinate algebras of a set
of matrices.
"""
@staticmethod
def __classcall__(cls, q=None, bar=None, R=None, **kwds):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: O1 = algebras.QuantumMatrixCoordinate(4)
sage: O2 = algebras.QuantumMatrixCoordinate(4, 4, q=q)
sage: O3 = algebras.QuantumMatrixCoordinate(4, R=ZZ)
sage: O4 = algebras.QuantumMatrixCoordinate(4, R=R, q=q)
sage: O1 is O2 and O2 is O3 and O3 is O4
True
sage: O5 = algebras.QuantumMatrixCoordinate(4, R=QQ)
sage: O1 is O5
False
"""
if R is None:
R = ZZ
else:
if q is not None:
q = R(q)
if q is None:
q = LaurentPolynomialRing(R, 'q').gen()
return super(QuantumMatrixCoordinateAlgebra_abstract,
cls).__classcall__(cls,
q=q, bar=bar, R=q.parent(), **kwds)
def __init__(self, gp_indices, n, q, bar, R, category, indices_key=None):
"""
Initialize ``self``.
TESTS::
sage: O = algebras.QuantumMatrixCoordinate(3, 2)
sage: TestSuite(O).run()
"""
self._n = n
self._q = q
if bar is None:
def bar(x):
return x.subs(q=~self._q)
self._bar = bar
if indices_key is None:
indices = IndexedFreeAbelianMonoid(gp_indices)
else:
indices = IndexedFreeAbelianMonoid(gp_indices, sorting_key=indices_key)
CombinatorialFreeModule.__init__(self, R, indices, category=category)
def _repr_term(self, m):
r"""
Return a string representation of the term indexed by ``m``.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: I = O.indices()
sage: x = I.an_element(); x
F[(1, 1)]^2*F[(1, 2)]^2*F[(1, 3)]^3
sage: O._repr_term(x)
'x[1,1]^2*x[1,2]^2*x[1,3]^3'
sage: O._repr_term(I.one())
'1'
sage: O.q() * O.one()
q
"""
S = m._sorted_items()
if not S:
return '1'
def exp(e):
return '^{}'.format(e) if e > 1 else ''
return '*'.join(('x[{},{}]'.format(*k) if k != 'c' else 'c') + exp(e)
for k, e in m._sorted_items())
def _latex_term(self, m):
r"""
Return a latex representation of the term indexed by ``m``.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: I = O.indices()
sage: x = I.an_element(); x
F[(1, 1)]^2*F[(1, 2)]^2*F[(1, 3)]^3
sage: O._latex_term(x)
'x_{1,1}^{2} x_{1,2}^{2} x_{1,3}^{3}'
sage: O._latex_term(I.one())
'1'
sage: latex(O.q() * O.one())
q
"""
S = m._sorted_items()
if not S:
return '1'
def exp(e):
return '^{{{}}}'.format(e) if e > 1 else ''
return ' '.join(('x_{{{},{}}}'.format(*k) if k != 'c' else 'c') + exp(e)
for k, e in m._sorted_items())
def n(self):
"""
Return the value `n`.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: O.n()
4
sage: O = algebras.QuantumMatrixCoordinate(4, 6)
sage: O.n()
6
"""
return self._n
def q(self):
"""
Return the variable ``q``.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: O.q()
q
sage: O.q().parent()
Univariate Laurent Polynomial Ring in q over Integer Ring
sage: O.q().parent() is O.base_ring()
True
"""
return self._q
@cached_method
def one_basis(self):
"""
Return the basis element indexing `1`.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: O.one_basis()
1
sage: O.one()
1
TESTS::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: O.one_basis() == O.indices().one()
True
"""
return self._indices.one()
@cached_method
def gens(self):
r"""
Return the generators of ``self`` as a tuple.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(3)
sage: O.gens()
(x[1,1], x[1,2], x[1,3],
x[2,1], x[2,2], x[2,3],
x[3,1], x[3,2], x[3,3])
"""
return tuple(self.algebra_generators())
@cached_method
def quantum_determinant(self):
r"""
Return the quantum determinant of ``self``.
The quantum determinant is defined by
.. MATH::
\det_q = \sum_{\sigma \in S_n} (-q)^{\ell(\sigma)}
x_{1, \sigma(1)} x_{2, \sigma(2)} \cdots x_{n, \sigma(n)}.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(2)
sage: O.quantum_determinant()
x[1,1]*x[2,2] - q*x[1,2]*x[2,1]
We verify that the quantum determinant is central::
sage: for n in range(2,5):
....: O = algebras.QuantumMatrixCoordinate(n)
....: qdet = O.quantum_determinant()
....: assert all(g * qdet == qdet * g for g in O.algebra_generators())
We also verify that it is group-like::
sage: for n in range(2,4):
....: O = algebras.QuantumMatrixCoordinate(n)
....: qdet = O.quantum_determinant()
....: assert qdet.coproduct() == tensor([qdet, qdet])
"""
if hasattr(self, '_m') and self._m != self._n:
raise ValueError("undefined for non-square quantum matrices")
from sage.combinat.permutation import Permutations
q = self._q
return self.sum(self.term(self._indices({(i, p(i)): 1 for i in range(1, self._n + 1)}),
(-q) ** p.length())
for p in Permutations(self._n))
def product_on_basis(self, a, b):
"""
Return the product of basis elements indexed by ``a`` and ``b``.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: x = O.algebra_generators()
sage: b = x[1,4] * x[2,1] * x[3,4] # indirect doctest
sage: b * (b * b) == (b * b) * b
True
sage: p = prod(list(O.algebra_generators())[:10])
sage: p * (p * p) == (p * p) * p # long time
True
sage: x = O.an_element()
sage: y = x^2 + x[4,4] * x[3,3] * x[1,2]
sage: z = x[2,2] * x[1,4] * x[3,4] * x[1,1]
sage: x * (y * z) == (x * y) * z
True
"""
al = a._sorted_items()
bl = b._sorted_items()
# Check for multiplication by 1
if not al:
return self.monomial(b)
if not bl:
return self.monomial(a)
if al[-1][0] < bl[0][0]: # Already in order
return self.monomial(a * b)
G = self._indices.monoid_generators()
one = self.base_ring().one()
ret = self.zero()
q = self._q
qi = q ** -1
monomial = b
coeff = one
for pos in range(len(al) - 1, -1, -1):
ax, ae = al[pos]
for bx, be in bl:
if ax[0] < bx[0]:
# In order, so nothing more to do
break
elif ax[0] == bx[0]:
if ax[1] > bx[1]:
# x_{it} x_{ij} = q^{-1} x_{ij} x_{it} if t < j
coeff *= qi ** (ae * be)
else:
# In order, so nothing more to do
break
elif ax[1] == bx[1]:
# x_{sj} x_{ij} = q^{-1} x_{ij} x_{sj} if s > i
coeff *= qi ** (ae * be)
elif ax[1] > bx[1]: # By this point, we must have ax[0] > bx[0]
# x_{st} x_{ij} = x_{ij} x_{st} + (q^-1 - q) x_{it} x_{sj}
# if s > i, t > j
# By Lemma 2.7 (with fixed typo) in H. Zhang and R.B. Zhang:
# x_{st} x_{ij}^k = x_{ij}^k x_{st}
# + (q^{1-2k} - q) x_{ij}^{k-1} x_{it} x_{sj}
m1 = G[bx] ** be * G[ax]
m2 = G[bx] ** (be - 1) * G[(bx[0], ax[1])] * G[(ax[0], bx[1])]
ret = self._from_dict({m1: one, m2: (q ** (1 - 2 * be) - q)})
ml = monomial._sorted_items()
index = ml.index((bx, be))
a_key = self._indices(dict(al[:pos]))
bp_key = self._indices(dict(ml[:index])) * G[ax] ** (ae - 1)
return (self.monomial(a_key) *
self.monomial(bp_key) *
ret *
self.term(self._indices(dict(ml[index + 1:])),
coeff))
# Otherwise ax[1] > bx[1], but for this case they commute:
# x_{st} x_{ij} = x_{ij} x_{st} if s > i, t < j
# So there is nothing to do to coeff
monomial *= G[ax] ** ae
return self.term(monomial, coeff)
@cached_method
def _bar_on_basis(self, x):
"""
Return the bar involution on the basis element indexed by ``x``.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: O._bar_on_basis(O._indices.an_element())
(q^-16)*x[1,1]^2*x[1,2]^2*x[1,3]^3
"""
ret = self.one()
for k, e in reversed(x._sorted_items()):
ret *= self.monomial(self._indices({k: e}))
return ret
def counit_on_basis(self, x):
r"""
Return the counit on the basis element indexed by ``x``.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: G = O.algebra_generators()
sage: I = [1,2,3,4]
sage: matrix([[G[i,j].counit() for i in I] for j in I]) # indirect doctest
[1 0 0 0]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
"""
if all(t == 'c' or t[0] == t[1] for t, e in x._sorted_items()):
return self.base_ring().one()
else:
return self.base_ring().zero()
class Element(CombinatorialFreeModule.Element):
"""
An element of a quantum matrix coordinate algebra.
"""
def bar(self):
r"""
Return the image of ``self`` under the bar involution.
The bar involution is the `\QQ`-algebra anti-automorphism
defined by `x_{ij} \mapsto x_{ji}` and `q \mapsto q^{-1}`.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: x = O.an_element()
sage: x.bar()
1 + 2*x[1,1] + (q^-16)*x[1,1]^2*x[1,2]^2*x[1,3]^3 + 3*x[1,2]
sage: x = O.an_element() * O.algebra_generators()[2,4]; x
x[1,1]^2*x[1,2]^2*x[1,3]^3*x[2,4] + 2*x[1,1]*x[2,4]
+ 3*x[1,2]*x[2,4] + x[2,4]
sage: xb = x.bar(); xb
(q^-16)*x[1,1]^2*x[1,2]^2*x[1,3]^3*x[2,4]
+ (q^-21-q^-15)*x[1,1]^2*x[1,2]^2*x[1,3]^2*x[1,4]*x[2,3]
+ (q^-22-q^-18)*x[1,1]^2*x[1,2]*x[1,3]^3*x[1,4]*x[2,2]
+ (q^-24-q^-20)*x[1,1]*x[1,2]^2*x[1,3]^3*x[1,4]*x[2,1]
+ 2*x[1,1]*x[2,4] + 3*x[1,2]*x[2,4]
+ (2*q^-1-2*q)*x[1,4]*x[2,1]
+ (3*q^-1-3*q)*x[1,4]*x[2,2] + x[2,4]
sage: xb.bar() == x
True
"""
P = self.parent()
return P.sum(P._bar(c) * P._bar_on_basis(m) for m, c in self)
class QuantumMatrixCoordinateAlgebra(QuantumMatrixCoordinateAlgebra_abstract):
r"""
A quantum matrix coordinate algebra.
Let `R` be a commutative ring. The quantum matrix coordinate algebra
of `M(m, n)` is the associative algebra over `R[q, q^{-1}]`
generated by `x_{ij}`, for `i = 1, 2, \ldots, m`, `j = 1, 2, \ldots, n`,
and subject to the following relations:
.. MATH::
\begin{array}{ll}
x_{it} x_{ij} = q^{-1} x_{ij} x_{it} & \text{if } j < t, \\
x_{sj} x_{ij} = q^{-1} x_{ij} x_{sj} & \text{if } i < s, \\
x_{st} x_{ij} = x_{ij} x_{st} & \text{if } i < s, j > t, \\
x_{st} x_{ij} = x_{ij} x_{st} + (q^{-1} - q) x_{it} x_{sj}
& \text{if } i < s, j < t. \\
\end{array}
The quantum matrix coordinate algebra is denoted by
`\mathcal{O}_q(M(m, n))`. For `m = n`, it is also a bialgebra given by
.. MATH::
\Delta(x_{ij}) = \sum_{k=1}^n x_{ik} \otimes x_{kj},
\varepsilon(x_{ij}) = \delta_{ij}.
Moreover, there is a central group-like element called the
*quantum determinant* that is defined by
.. MATH::
\det_q = \sum_{\sigma \in S_n} (-q)^{\ell(\sigma)}
x_{1,\sigma(1)} x_{2,\sigma(2)} \cdots x_{n,\sigma(n)}.
The quantum matrix coordinate algebra also has natural inclusions
when restricting to submatrices. That is, let
`I \subseteq \{1, 2, \ldots, m\}` and `J \subseteq \{1, 2, \ldots, n\}`.
Then the subalgebra generated by `\{ x_{ij} \mid i \in I, j \in J \}`
is naturally isomorphic to `\mathcal{O}_q(M(|I|, |J|))`.
.. NOTE::
The `q` considered here is `q^2` in some references, e.g., [ZZ2005]_.
INPUT:
- ``m`` -- the integer `m`
- ``n`` -- the integer `n`
- ``R`` -- (optional) the ring `R` if `q` is not specified
(the default is `\ZZ`); otherwise the ring containing `q`
- ``q`` -- (optional) the variable `q`; the default is
`q \in R[q, q^{-1}]`
- ``bar`` -- (optional) the involution on the base ring; the
default is `q \mapsto q^{-1}`
EXAMPLES:
We construct `\mathcal{O}_q(M(2,3))` and the variables::
sage: O = algebras.QuantumMatrixCoordinate(2,3)
sage: O.inject_variables()
Defining x11, x12, x13, x21, x22, x23
We do some basic computations::
sage: x21 * x11
(q^-1)*x[1,1]*x[2,1]
sage: x23 * x12 * x11
(q^-1)*x[1,1]*x[1,2]*x[2,3] + (q^-2-1)*x[1,1]*x[1,3]*x[2,2]
+ (q^-3-q^-1)*x[1,2]*x[1,3]*x[2,1]
We construct the maximal quantum minors::
sage: q = O.q()
sage: qm12 = x11*x22 - q*x12*x21
sage: qm13 = x11*x23 - q*x13*x21
sage: qm23 = x12*x23 - q*x13*x22
However, unlike for the quantum determinant, they are not central::
sage: all(qm12 * g == g * qm12 for g in O.algebra_generators())
False
sage: all(qm13 * g == g * qm13 for g in O.algebra_generators())
False
sage: all(qm23 * g == g * qm23 for g in O.algebra_generators())
False
REFERENCES:
- [FRT1990]_
- [ZZ2005]_
"""
@staticmethod
def __classcall_private__(cls, m, n=None, q=None, bar=None, R=None):
r"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: O1 = algebras.QuantumMatrixCoordinate(4)
sage: O2 = algebras.QuantumMatrixCoordinate(4, 4, q=q)
sage: O3 = algebras.QuantumMatrixCoordinate(4, R=ZZ)
sage: O4 = algebras.QuantumMatrixCoordinate(4, R=R, q=q)
sage: O1 is O2 and O2 is O3 and O3 is O4
True
sage: O5 = algebras.QuantumMatrixCoordinate(4, R=QQ)
sage: O1 is O5
False
"""
if n is None:
n = m
return super(QuantumMatrixCoordinateAlgebra, cls).__classcall__(cls, m=m, n=n,
q=q, bar=bar,
R=R)
def __init__(self, m, n, q, bar, R):
"""
Initialize ``self``.
TESTS::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: TestSuite(O).run()
"""
gp_indices = [(i, j) for i in range(1, m + 1) for j in range(1, n + 1)]
if m == n:
cat = Bialgebras(R.category()).WithBasis()
else:
cat = Algebras(R.category()).WithBasis()
self._m = m
QuantumMatrixCoordinateAlgebra_abstract.__init__(self, gp_indices, n, q, bar, R, cat)
# Set the names
names = ['x{}{}'.format(*k) for k in gp_indices]
self._assign_names(names)
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: algebras.QuantumMatrixCoordinate(4)
Quantized coordinate algebra of M(4, 4) with q=q over
Univariate Laurent Polynomial Ring in q over Integer Ring
sage: algebras.QuantumMatrixCoordinate(4, 2)
Quantized coordinate algebra of M(4, 2) with q=q over
Univariate Laurent Polynomial Ring in q over Integer Ring
"""
txt = "Quantized coordinate algebra of M({}, {}) with q={} over {}"
return txt.format(self._m, self._n, self._q, self.base_ring())
def _latex_(self):
r"""
Return a latex representation of ``self``.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: latex(O)
\mathcal{O}_{q}(M(4, 4))
"""
return "\\mathcal{O}_{%s}(M(%s, %s))" % (self._q, self._m, self._n)
def m(self):
"""
Return the value `m`.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4, 6)
sage: O.m()
4
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: O.m()
4
"""
return self._m
@cached_method
def algebra_generators(self):
"""
Return the algebra generators of ``self``.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(2)
sage: O.algebra_generators()
Finite family {(1, 1): x[1,1], (1, 2): x[1,2], (2, 1): x[2,1], (2, 2): x[2,2]}
"""
l = [(i, j) for i in range(1, self._m + 1)
for j in range(1, self._n + 1)]
G = self._indices.monoid_generators()
one = self.base_ring().one()
return Family(l, lambda x: self.element_class(self, {G[x]: one}))
def coproduct_on_basis(self, x):
r"""
Return the coproduct on the basis element indexed by ``x``.
EXAMPLES::
sage: O = algebras.QuantumMatrixCoordinate(4)
sage: x24 = O.algebra_generators()[2,4]
sage: O.coproduct_on_basis(x24.leading_support())
x[2,1] # x[1,4] + x[2,2] # x[2,4] + x[2,3] # x[3,4] + x[2,4] # x[4,4]
TESTS:
We check that it is an algebra morphism::
sage: O = algebras.QuantumMatrixCoordinate(3)
sage: G = O.algebra_generators()
sage: all(x.coproduct() * y.coproduct() == (x * y).coproduct()
....: for x in G for y in G)
True
"""
if self._m != self._n:
raise ValueError("undefined for non-square quantum matrices")
T = self.tensor_square()
I = self._indices.monoid_generators()
return T.prod(T.sum_of_monomials((I[t[0], k], I[k, t[1]])
for k in range(1, self._n + 1)) ** e
for t, e in x._sorted_items())
class QuantumGL(QuantumMatrixCoordinateAlgebra_abstract):
r"""
Quantum coordinate algebra of `GL(n)`.
The quantum coordinate algebra of `GL(n)`, or quantum `GL(n)`
for short and denoted by `\mathcal{O}_q(GL(n))`, is the quantum
coordinate algebra of `M_R(n, n)` with the addition of the
additional central group-like element `c` which satisfies
`c d = d c = 1`, where `d` is the quantum determinant.
Quantum `GL(n)` is a Hopf algebra where `\varepsilon(c) = 1`
and the antipode `S` is given by the (quantum) matrix inverse.
That is to say, we have `S(c) = c^-1 = d` and
.. MATH::
S(x_{ij}) = c * (-q)^{i-j} * \tilde{t}_{ji},
where we have the quantum minor
.. MATH::
\tilde{t}_{ij} = \sum_{\sigma} (-q)^{\ell(\sigma)}
x_{1, \sigma(1)} \cdots x_{i-1, \sigma(i-1)} x_{i+1, \sigma(i+1)}
\cdots x_{n, \sigma(n)}
with the sum over permutations `\sigma \colon \{1, \ldots, i-1, i+1,
\ldots n\} \to \{1, \ldots, j-1, j+1, \ldots, n\}`.
.. SEEALSO::
:class:`QuantumMatrixCoordinateAlgebra`
INPUT:
- ``n`` -- the integer `n`
- ``R`` -- (optional) the ring `R` if `q` is not specified
(the default is `\ZZ`); otherwise the ring containing `q`
- ``q`` -- (optional) the variable `q`; the default is
`q \in R[q, q^{-1}]`
- ``bar`` -- (optional) the involution on the base ring; the
default is `q \mapsto q^{-1}`
EXAMPLES:
We construct `\mathcal{O}_q(GL(3))` and the variables::
sage: O = algebras.QuantumGL(3)
sage: O.inject_variables()
Defining x11, x12, x13, x21, x22, x23, x31, x32, x33, c
We do some basic computations::
sage: x33 * x12
x[1,2]*x[3,3] + (q^-1-q)*x[1,3]*x[3,2]
sage: x23 * x12 * x11
(q^-1)*x[1,1]*x[1,2]*x[2,3] + (q^-2-1)*x[1,1]*x[1,3]*x[2,2]
+ (q^-3-q^-1)*x[1,2]*x[1,3]*x[2,1]
sage: c * O.quantum_determinant()
1
We verify the quantum determinant is in the center and is group-like::
sage: qdet = O.quantum_determinant()
sage: all(qdet * g == g * qdet for g in O.algebra_generators())
True
sage: qdet.coproduct() == tensor([qdet, qdet])
True
We check that the inverse of the quantum determinant is also in
the center and group-like::
sage: all(c * g == g * c for g in O.algebra_generators())
True
sage: c.coproduct() == tensor([c, c])
True
Moreover, the antipode interchanges the quantum determinant and
its inverse::
sage: c.antipode() == qdet
True
sage: qdet.antipode() == c
True
REFERENCES:
.. [DD91] \R. Dipper and S. Donkin. *Quantum* `GL_n`.
Proc. London Math. Soc. (3) **63** (1991), no. 1, pp. 165-211.
.. [Karimipour93] Vahid Karimipour.
*Representations of the coordinate ring of* `GL_q(n)`.
(1993). :arxiv:`hep-th/9306058`.
"""
@staticmethod
def __classcall_private__(cls, n, q=None, bar=None, R=None):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: R.<q> = LaurentPolynomialRing(ZZ)
sage: O1 = algebras.QuantumGL(4)
sage: O2 = algebras.QuantumGL(4, R=ZZ)
sage: O3 = algebras.QuantumGL(4, R=R, q=q)
sage: O1 is O2 and O2 is O3
True
sage: O4 = algebras.QuantumGL(4, R=QQ)
sage: O1 is O4
False
"""
return super(QuantumGL, cls).__classcall__(cls, n=n, q=q, bar=bar, R=R)
def __init__(self, n, q, bar, R):
"""
Initialize ``self``.
TESTS::
sage: O = algebras.QuantumGL(2)
sage: elts = list(O.algebra_generators())
sage: elts += [O.quantum_determinant(), O.an_element()]
sage: TestSuite(O).run(elements=elts) # long time
"""
# Set the names
gp_indices = [(i, j) for i in range(1, n + 1) for j in range(1, n + 1)]
gp_indices.append('c')
cat = HopfAlgebras(R.category()).WithBasis()
QuantumMatrixCoordinateAlgebra_abstract.__init__(self, gp_indices, n, q,
bar, R, cat,
indices_key=_generator_key)
names = ['x{}{}'.format(*k) for k in gp_indices[:-1]]
names.append('c')
self._assign_names(names)
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: algebras.QuantumGL(4)
Quantized coordinate algebra of GL(4) with q=q over
Univariate Laurent Polynomial Ring in q over Integer Ring
"""
txt = "Quantized coordinate algebra of GL({}) with q={} over {}"
return txt.format(self._n, self._q, self.base_ring())
def _latex_(self):
r"""
Return a latex representation of ``self``.
EXAMPLES::
sage: O = algebras.QuantumGL(4)
sage: latex(O)
\mathcal{O}_{q}(GL(4))
"""
return "\\mathcal{O}_{%s}(GL(%s))" % (self._q, self._n)
@cached_method
def algebra_generators(self):
"""
Return the algebra generators of ``self``.
EXAMPLES::
sage: O = algebras.QuantumGL(2)
sage: O.algebra_generators()
Finite family {(1, 1): x[1,1], (1, 2): x[1,2], (2, 1): x[2,1], (2, 2): x[2,2], 'c': c}
"""
l = [(i, j) for i in range(1, self._n + 1)
for j in range(1, self._n + 1)]
l.append('c')
G = self._indices.monoid_generators()
one = self.base_ring().one()
return Family(l, lambda x: self.element_class(self, {G[x]: one}))
@lazy_attribute
def _qdet_cancel_monomial(self):
"""
Return the trailing monomial of the quantum determinant.
EXAMPLES::
sage: O = algebras.QuantumGL(2)
sage: O._qdet_cancel_monomial
F[(1, 1)]*F[(2, 2)]
"""
I = self._indices
gens = I.monoid_generators()
return I.prod(gens[i, i] for i in range(1, self._n + 1))
@lazy_attribute
def _qdet_remaining(self):
r"""
Return the remaining terms when cancelling the leading term.
Consider `d = m + L`, where `m` is the leading term of the
quantum determinant `d`. Then we have `c d = cm + cL = 1`,
which we rewrite as `cm = 1 - cL`. This lazy attribute
is `1 - cL`.
EXAMPLES::
sage: O = algebras.QuantumGL(2)
sage: O._qdet_remaining
1 + q*c*x[1,2]*x[2,1]
"""
temp = self.monomial(self._qdet_cancel_monomial) - self.quantum_determinant()
c = self._indices.monoid_generators()['c']
ret = {c * mon: coeff for mon, coeff in temp}
return self._from_dict(ret, remove_zeros=False) + self.one()
def product_on_basis(self, a, b):
r"""
Return the product of basis elements indexed by ``a`` and ``b``.
EXAMPLES::
sage: O = algebras.QuantumGL(2)
sage: I = O.indices().monoid_generators()
sage: O.product_on_basis(I[1,1], I[2,2])
x[1,1]*x[2,2]
sage: O.product_on_basis(I[2,2], I[1,1])
x[1,1]*x[2,2] + (q^-1-q)*x[1,2]*x[2,1]
TESTS::
sage: x11,x12,x21,x22,c = O.algebra_generators()
sage: x11 * x22
x[1,1]*x[2,2]
sage: x22 * x12
(q^-1)*x[1,2]*x[2,2]
sage: x22 * x11
x[1,1]*x[2,2] + (q^-1-q)*x[1,2]*x[2,1]
sage: c * (x11 * O.quantum_determinant())
x[1,1]
"""
I = self._indices
c_exp = 0
if 'c' in a._monomial:
da = dict(a._monomial) # Make a copy
c_exp += da.pop('c')
a = I(da)
if 'c' in b._monomial:
db = dict(b._monomial) # Make a copy
c_exp += db.pop('c')
b = I(db)
# a and b contain no powers of c
p = super(QuantumGL, self).product_on_basis(a, b)
if c_exp == 0:
return p
c = self._indices.monoid_generators()['c']
ret = {}
other = self.zero()
for mon, coeff in p:
try:
# Given that cz = R and we have a monomial ab, we need to
# rewrite zx in terms of ab plus lower order terms L:
# zx = X * ab + L
# c * zx = R * x = c * X * ab + c * L
# c * ab = (R * x - c * L) / X
rem = self.monomial(mon // self._qdet_cancel_monomial)
L = self.monomial(self._qdet_cancel_monomial) * rem
co = L[mon]
del L._monomial_coefficients[mon]
temp = self.term(c ** (c_exp - 1), coeff) * self._qdet_remaining * rem
if L != self.zero():
temp -= self.term(c ** c_exp, coeff) * L
for k in temp._monomial_coefficients:
temp._monomial_coefficients[k] //= co
other += temp
except ValueError: # We cannot cancel, so we just add on the correct power of c
ret[c ** c_exp * mon] = coeff
return self._from_dict(ret, remove_zeros=False) + other
@cached_method
def _antipode_on_generator(self, i, j):
"""
Return the antipode on the generator indexed by ``(i, j)``.
EXAMPLES::
sage: O = algebras.QuantumGL(2)
sage: [[O._antipode_on_generator(i, j) for i in [1,2]] for j in [1,2]]
[[c*x[2,2], -q*c*x[2,1]],
[-(q^-1)*c*x[1,2], c*x[1,1]]]
"""
from sage.combinat.permutation import Permutations
q = self._q
I = list(range(1, j)) + list(range(j + 1, self._n + 1))
def lift(p):
return [val if val < i else val + 1 for val in p]
gens = self.algebra_generators()
t_tilde = self.sum((-q) ** p.length() * gens['c'] *
self.prod(gens[I[k], val]
for k, val in enumerate(lift(p)))
for p in Permutations(self._n - 1))
return (-q) ** (i - j) * t_tilde
def antipode_on_basis(self, x):
r"""
Return the antipode of the basis element indexed by ``x``.
EXAMPLES::
sage: O = algebras.QuantumGL(3)
sage: x = O.indices().monoid_generators()
sage: O.antipode_on_basis(x[1,2])
-(q^-1)*c*x[1,2]*x[3,3] + c*x[1,3]*x[3,2]
sage: O.antipode_on_basis(x[2,2])
c*x[1,1]*x[3,3] - q*c*x[1,3]*x[3,1]
sage: O.antipode_on_basis(x['c']) == O.quantum_determinant()
True
"""
ret = self.one()
for k, e in reversed(x._sorted_items()):
if k == 'c':
ret *= self.quantum_determinant() ** e
else:
ret *= self._antipode_on_generator(*k) ** e
return ret
def coproduct_on_basis(self, x):
r"""
Return the coproduct on the basis element indexed by ``x``.
EXAMPLES::
sage: O = algebras.QuantumGL(3)
sage: x = O.indices().monoid_generators()
sage: O.coproduct_on_basis(x[1,2])
x[1,1] # x[1,2] + x[1,2] # x[2,2] + x[1,3] # x[3,2]
sage: O.coproduct_on_basis(x[2,2])
x[2,1] # x[1,2] + x[2,2] # x[2,2] + x[2,3] # x[3,2]
sage: O.coproduct_on_basis(x['c'])
c # c
"""
T = self.tensor_square()
I = self._indices.monoid_generators()
return T.prod(T.sum_of_monomials((I[t[0], k], I[k, t[1]])
for k in range(1, self._n + 1)) ** e
if t != 'c' else T.monomial((I['c'], I['c'])) ** e
for t, e in x._sorted_items())
def _generator_key(t):
"""
Helper function to make ``'c'`` less that all other indices for
sorting the monomials in :class:`QuantumGL`.
EXAMPLES::
sage: from sage.algebras.quantum_matrix_coordinate_algebra import _generator_key as k
sage: k((1,2)) < k('c')
False
sage: k((1,2)) < k((1,3))
True
sage: k((1,2)) < k((3,1))
True
sage: k('c') < k((1,1))
True
"""
if isinstance(t, tuple):
return t
return ()
| 33.953676
| 98
| 0.495195
|
acf0996ff1266d915aeabb9ef3a8ff306d1e9121
| 43,346
|
py
|
Python
|
cookbook/lib/python3.7/site-packages/google/cloud/firestore_admin_v1/proto/operation_pb2.py
|
ethanga12/cookbooktbd
|
bc310546f4b05d29a24eff79242c252a086d7260
|
[
"Apache-2.0"
] | 1
|
2021-01-15T18:00:01.000Z
|
2021-01-15T18:00:01.000Z
|
cookbook/lib/python3.7/site-packages/google/cloud/firestore_admin_v1/proto/operation_pb2.py
|
ethanga12/cookbooktbd
|
bc310546f4b05d29a24eff79242c252a086d7260
|
[
"Apache-2.0"
] | null | null | null |
cookbook/lib/python3.7/site-packages/google/cloud/firestore_admin_v1/proto/operation_pb2.py
|
ethanga12/cookbooktbd
|
bc310546f4b05d29a24eff79242c252a086d7260
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_admin_v1/proto/operation.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.cloud.firestore_admin_v1.proto import (
index_pb2 as google_dot_cloud_dot_firestore__admin__v1_dot_proto_dot_index__pb2,
)
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/firestore_admin_v1/proto/operation.proto",
package="google.firestore.admin.v1",
syntax="proto3",
serialized_options=b"\n\035com.google.firestore.admin.v1B\016OperationProtoP\001Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\242\002\004GCFS\252\002\037Google.Cloud.Firestore.Admin.V1\312\002\037Google\\Cloud\\Firestore\\Admin\\V1\352\002#Google::Cloud::Firestore::Admin::V1",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n5google/cloud/firestore_admin_v1/proto/operation.proto\x12\x19google.firestore.admin.v1\x1a\x31google/cloud/firestore_admin_v1/proto/index.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xbd\x02\n\x16IndexOperationMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05index\x18\x03 \x01(\t\x12\x38\n\x05state\x18\x04 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x06 \x01(\x0b\x32#.google.firestore.admin.v1.Progress"\x88\x05\n\x16\x46ieldOperationMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05\x66ield\x18\x03 \x01(\t\x12_\n\x13index_config_deltas\x18\x04 \x03(\x0b\x32\x42.google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta\x12\x38\n\x05state\x18\x05 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x06 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x07 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x1a\xe7\x01\n\x10IndexConfigDelta\x12\x62\n\x0b\x63hange_type\x18\x01 \x01(\x0e\x32M.google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.ChangeType\x12/\n\x05index\x18\x02 \x01(\x0b\x32 .google.firestore.admin.v1.Index">\n\nChangeType\x12\x1b\n\x17\x43HANGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41\x44\x44\x10\x01\x12\n\n\x06REMOVE\x10\x02"\xec\x02\n\x17\x45xportDocumentsMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x0foperation_state\x18\x03 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x04 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12\x16\n\x0e\x63ollection_ids\x18\x06 \x03(\t\x12\x19\n\x11output_uri_prefix\x18\x07 \x01(\t"\xeb\x02\n\x17ImportDocumentsMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x0foperation_state\x18\x03 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x04 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12\x16\n\x0e\x63ollection_ids\x18\x06 \x03(\t\x12\x18\n\x10input_uri_prefix\x18\x07 \x01(\t"4\n\x17\x45xportDocumentsResponse\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t":\n\x08Progress\x12\x16\n\x0e\x65stimated_work\x18\x01 \x01(\x03\x12\x16\n\x0e\x63ompleted_work\x18\x02 \x01(\x03*\x9e\x01\n\x0eOperationState\x12\x1f\n\x1bOPERATION_STATE_UNSPECIFIED\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x0e\n\nPROCESSING\x10\x02\x12\x0e\n\nCANCELLING\x10\x03\x12\x0e\n\nFINALIZING\x10\x04\x12\x0e\n\nSUCCESSFUL\x10\x05\x12\n\n\x06\x46\x41ILED\x10\x06\x12\r\n\tCANCELLED\x10\x07\x42\xe2\x01\n\x1d\x63om.google.firestore.admin.v1B\x0eOperationProtoP\x01Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\xa2\x02\x04GCFS\xaa\x02\x1fGoogle.Cloud.Firestore.Admin.V1\xca\x02\x1fGoogle\\Cloud\\Firestore\\Admin\\V1\xea\x02#Google::Cloud::Firestore::Admin::V1b\x06proto3',
dependencies=[
google_dot_cloud_dot_firestore__admin__v1_dot_proto_dot_index__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_OPERATIONSTATE = _descriptor.EnumDescriptor(
name="OperationState",
full_name="google.firestore.admin.v1.OperationState",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="OPERATION_STATE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="INITIALIZING",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="PROCESSING",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CANCELLING",
index=3,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="FINALIZING",
index=4,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="SUCCESSFUL",
index=5,
number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="FAILED",
index=6,
number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CANCELLED",
index=7,
number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2017,
serialized_end=2175,
)
_sym_db.RegisterEnumDescriptor(_OPERATIONSTATE)
OperationState = enum_type_wrapper.EnumTypeWrapper(_OPERATIONSTATE)
OPERATION_STATE_UNSPECIFIED = 0
INITIALIZING = 1
PROCESSING = 2
CANCELLING = 3
FINALIZING = 4
SUCCESSFUL = 5
FAILED = 6
CANCELLED = 7
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE = _descriptor.EnumDescriptor(
name="ChangeType",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.ChangeType",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="CHANGE_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="ADD",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="REMOVE",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1105,
serialized_end=1167,
)
_sym_db.RegisterEnumDescriptor(_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE)
_INDEXOPERATIONMETADATA = _descriptor.Descriptor(
name="IndexOperationMetadata",
full_name="google.firestore.admin.v1.IndexOperationMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.IndexOperationMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.IndexOperationMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index",
full_name="google.firestore.admin.v1.IndexOperationMetadata.index",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.firestore.admin.v1.IndexOperationMetadata.state",
index=3,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.IndexOperationMetadata.progress_documents",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.IndexOperationMetadata.progress_bytes",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=199,
serialized_end=516,
)
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA = _descriptor.Descriptor(
name="IndexConfigDelta",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="change_type",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.change_type",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index",
full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.index",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE,],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=936,
serialized_end=1167,
)
_FIELDOPERATIONMETADATA = _descriptor.Descriptor(
name="FieldOperationMetadata",
full_name="google.firestore.admin.v1.FieldOperationMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.FieldOperationMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.FieldOperationMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="field",
full_name="google.firestore.admin.v1.FieldOperationMetadata.field",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="index_config_deltas",
full_name="google.firestore.admin.v1.FieldOperationMetadata.index_config_deltas",
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.firestore.admin.v1.FieldOperationMetadata.state",
index=4,
number=5,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.FieldOperationMetadata.progress_documents",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.FieldOperationMetadata.progress_bytes",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=519,
serialized_end=1167,
)
_EXPORTDOCUMENTSMETADATA = _descriptor.Descriptor(
name="ExportDocumentsMetadata",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="operation_state",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.operation_state",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.progress_documents",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.progress_bytes",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="collection_ids",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.collection_ids",
index=5,
number=6,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="output_uri_prefix",
full_name="google.firestore.admin.v1.ExportDocumentsMetadata.output_uri_prefix",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1170,
serialized_end=1534,
)
_IMPORTDOCUMENTSMETADATA = _descriptor.Descriptor(
name="ImportDocumentsMetadata",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.start_time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.end_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="operation_state",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.operation_state",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_documents",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.progress_documents",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="progress_bytes",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.progress_bytes",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="collection_ids",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.collection_ids",
index=5,
number=6,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="input_uri_prefix",
full_name="google.firestore.admin.v1.ImportDocumentsMetadata.input_uri_prefix",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1537,
serialized_end=1900,
)
_EXPORTDOCUMENTSRESPONSE = _descriptor.Descriptor(
name="ExportDocumentsResponse",
full_name="google.firestore.admin.v1.ExportDocumentsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="output_uri_prefix",
full_name="google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1902,
serialized_end=1954,
)
_PROGRESS = _descriptor.Descriptor(
name="Progress",
full_name="google.firestore.admin.v1.Progress",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="estimated_work",
full_name="google.firestore.admin.v1.Progress.estimated_work",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="completed_work",
full_name="google.firestore.admin.v1.Progress.completed_work",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1956,
serialized_end=2014,
)
_INDEXOPERATIONMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_INDEXOPERATIONMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_INDEXOPERATIONMETADATA.fields_by_name["state"].enum_type = _OPERATIONSTATE
_INDEXOPERATIONMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_INDEXOPERATIONMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.fields_by_name[
"change_type"
].enum_type = _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.fields_by_name[
"index"
].message_type = (
google_dot_cloud_dot_firestore__admin__v1_dot_proto_dot_index__pb2._INDEX
)
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.containing_type = _FIELDOPERATIONMETADATA
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE.containing_type = (
_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA
)
_FIELDOPERATIONMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_FIELDOPERATIONMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_FIELDOPERATIONMETADATA.fields_by_name[
"index_config_deltas"
].message_type = _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA
_FIELDOPERATIONMETADATA.fields_by_name["state"].enum_type = _OPERATIONSTATE
_FIELDOPERATIONMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_FIELDOPERATIONMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
_EXPORTDOCUMENTSMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EXPORTDOCUMENTSMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EXPORTDOCUMENTSMETADATA.fields_by_name["operation_state"].enum_type = _OPERATIONSTATE
_EXPORTDOCUMENTSMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_EXPORTDOCUMENTSMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
_IMPORTDOCUMENTSMETADATA.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_IMPORTDOCUMENTSMETADATA.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_IMPORTDOCUMENTSMETADATA.fields_by_name["operation_state"].enum_type = _OPERATIONSTATE
_IMPORTDOCUMENTSMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS
_IMPORTDOCUMENTSMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS
DESCRIPTOR.message_types_by_name["IndexOperationMetadata"] = _INDEXOPERATIONMETADATA
DESCRIPTOR.message_types_by_name["FieldOperationMetadata"] = _FIELDOPERATIONMETADATA
DESCRIPTOR.message_types_by_name["ExportDocumentsMetadata"] = _EXPORTDOCUMENTSMETADATA
DESCRIPTOR.message_types_by_name["ImportDocumentsMetadata"] = _IMPORTDOCUMENTSMETADATA
DESCRIPTOR.message_types_by_name["ExportDocumentsResponse"] = _EXPORTDOCUMENTSRESPONSE
DESCRIPTOR.message_types_by_name["Progress"] = _PROGRESS
DESCRIPTOR.enum_types_by_name["OperationState"] = _OPERATIONSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IndexOperationMetadata = _reflection.GeneratedProtocolMessageType(
"IndexOperationMetadata",
(_message.Message,),
{
"DESCRIPTOR": _INDEXOPERATIONMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreA
dmin.CreateIndex].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
index:
The index resource that this operation is acting on. For
example: ``projects/{project_id}/databases/{database_id}/colle
ctionGroups/{collection_id}/indexes/{index_id}``
state:
The state of the operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.IndexOperationMetadata)
},
)
_sym_db.RegisterMessage(IndexOperationMetadata)
FieldOperationMetadata = _reflection.GeneratedProtocolMessageType(
"FieldOperationMetadata",
(_message.Message,),
{
"IndexConfigDelta": _reflection.GeneratedProtocolMessageType(
"IndexConfigDelta",
(_message.Message,),
{
"DESCRIPTOR": _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Information about an index configuration change.
Attributes:
change_type:
Specifies how the index is changing.
index:
The index being changed.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta)
},
),
"DESCRIPTOR": _FIELDOPERATIONMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreA
dmin.UpdateField].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
field:
The field resource that this operation is acting on. For
example: ``projects/{project_id}/databases/{database_id}/colle
ctionGroups/{collection_id}/fields/{field_path}``
index_config_deltas:
A list of [IndexConfigDelta][google.firestore.admin.v1.FieldOp
erationMetadata.IndexConfigDelta], which describe the intent
of this operation.
state:
The state of the operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.FieldOperationMetadata)
},
)
_sym_db.RegisterMessage(FieldOperationMetadata)
_sym_db.RegisterMessage(FieldOperationMetadata.IndexConfigDelta)
ExportDocumentsMetadata = _reflection.GeneratedProtocolMessageType(
"ExportDocumentsMetadata",
(_message.Message,),
{
"DESCRIPTOR": _EXPORTDOCUMENTSMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.Firest
oreAdmin.ExportDocuments].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
operation_state:
The state of the export operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
collection_ids:
Which collection ids are being exported.
output_uri_prefix:
Where the entities are being exported to.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ExportDocumentsMetadata)
},
)
_sym_db.RegisterMessage(ExportDocumentsMetadata)
ImportDocumentsMetadata = _reflection.GeneratedProtocolMessageType(
"ImportDocumentsMetadata",
(_message.Message,),
{
"DESCRIPTOR": _IMPORTDOCUMENTSMETADATA,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Metadata for
[google.longrunning.Operation][google.longrunning.Operation] results
from [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.Firest
oreAdmin.ImportDocuments].
Attributes:
start_time:
The time this operation started.
end_time:
The time this operation completed. Will be unset if operation
still in progress.
operation_state:
The state of the import operation.
progress_documents:
The progress, in documents, of this operation.
progress_bytes:
The progress, in bytes, of this operation.
collection_ids:
Which collection ids are being imported.
input_uri_prefix:
The location of the documents being imported.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ImportDocumentsMetadata)
},
)
_sym_db.RegisterMessage(ImportDocumentsMetadata)
ExportDocumentsResponse = _reflection.GeneratedProtocolMessageType(
"ExportDocumentsResponse",
(_message.Message,),
{
"DESCRIPTOR": _EXPORTDOCUMENTSRESPONSE,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Returned in the
[google.longrunning.Operation][google.longrunning.Operation] response
field.
Attributes:
output_uri_prefix:
Location of the output files. This can be used to begin an
import into Cloud Firestore (this project or another project)
after the operation completes successfully.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ExportDocumentsResponse)
},
)
_sym_db.RegisterMessage(ExportDocumentsResponse)
Progress = _reflection.GeneratedProtocolMessageType(
"Progress",
(_message.Message,),
{
"DESCRIPTOR": _PROGRESS,
"__module__": "google.cloud.firestore_admin_v1.proto.operation_pb2",
"__doc__": """Describes the progress of the operation. Unit of work is generic and
must be interpreted based on where
[Progress][google.firestore.admin.v1.Progress] is used.
Attributes:
estimated_work:
The amount of work estimated.
completed_work:
The amount of work completed.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1.Progress)
},
)
_sym_db.RegisterMessage(Progress)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.548061
| 3,531
| 0.643289
|
e943b709981fb20d8be243b5853e805ef571940a
| 1,515
|
py
|
Python
|
vice/yields/ccsne/engines/tests/cutoff.py
|
rcooke-ast/VICE
|
762911eb4192c7206ce2ae36b645d120ed889cb7
|
[
"MIT"
] | 22
|
2018-09-26T21:02:51.000Z
|
2022-03-24T18:07:03.000Z
|
vice/yields/ccsne/engines/tests/cutoff.py
|
rcooke-ast/VICE
|
762911eb4192c7206ce2ae36b645d120ed889cb7
|
[
"MIT"
] | 2
|
2019-05-03T13:08:27.000Z
|
2021-02-17T20:11:37.000Z
|
vice/yields/ccsne/engines/tests/cutoff.py
|
rcooke-ast/VICE
|
762911eb4192c7206ce2ae36b645d120ed889cb7
|
[
"MIT"
] | 3
|
2019-05-10T19:26:31.000Z
|
2021-11-10T08:13:42.000Z
|
r"""
This file implements unit testing of the ``cutoff`` derived class.
"""
from __future__ import absolute_import
from ..cutoff import cutoff
from ..._yield_integrator import _MINIMUM_MASS_
from .....testing import moduletest
from .....testing import unittest
import random
@moduletest
def test():
r"""
vice.yields.ccsne.engines.cutoff module test
"""
return ["vice.yields.ccsne.engines.cutoff",
[
test_initialization(),
test_call()
]
]
@unittest
def test_initialization():
r"""
Performs a unit test on the ``cutoff`` constructor.
"""
def test():
try:
test_ = cutoff()
except:
return False
status = isinstance(test_, cutoff)
status &= test_.masses == []
status &= test_.frequencies == []
return status
return ["vice.yields.ccsne.engines.cutoff.__init__", test]
@unittest
def test_call():
r"""
Performs a unit test on the __call__ function.
"""
def test():
try:
test_ = cutoff()
except:
return None
random.seed()
status = True
for i in range(100):
new_threshold = _MINIMUM_MASS_ + (100 -
_MINIMUM_MASS_) * random.random()
try:
test_.collapse_mass = new_threshold
except:
return None
for j in range(10):
test_mass = _MINIMUM_MASS_ + (100 -
_MINIMUM_MASS_) * random.random()
try:
status &= test_(test_mass) == float(
test_mass <= new_threshold)
except:
return False
if not status: break
if not status: break
return status
return ["vice.yields.ccsne.engines.cutoff.__call__", test]
| 19.934211
| 66
| 0.675248
|
6b679cb9db41d1a3c4e816f655519546e43de3dc
| 4,129
|
py
|
Python
|
environments/utils.py
|
bunthet01/robotics-rl-srl
|
21bed859ca821844b2e1a90f4786675c9e1dd151
|
[
"MIT"
] | 5
|
2019-08-21T22:57:21.000Z
|
2021-01-01T21:15:26.000Z
|
environments/utils.py
|
bunthet01/robotics-rl-srl
|
21bed859ca821844b2e1a90f4786675c9e1dd151
|
[
"MIT"
] | null | null | null |
environments/utils.py
|
bunthet01/robotics-rl-srl
|
21bed859ca821844b2e1a90f4786675c9e1dd151
|
[
"MIT"
] | null | null | null |
# Modified version of https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/envs.py
import importlib
import os
from gym.envs.registration import registry, patch_deprecated_methods, load
from stable_baselines import bench
import numpy as np
def dynamicEnvLoad(env_id):
"""
Get from Gym, the module where the environment is stored
:param env_id: (str)
:return: (module, str, str) module_env, class_name, env_module_path
"""
# Get from the env_id, the entry_point, and distinguish if it is a callable, or a string
entry_point = registry.spec(env_id)._entry_point
if callable(entry_point):
class_name = entry_point.__name__
env_module_path = entry_point.__module__
else:
class_name = entry_point.split(':')[1]
env_module_path = entry_point.split(':')[0]
# Lets try and dynamically load the module_env, in order to fetch the globals.
# If it fails, it means that it was unable to load the path from the entry_point
# should this occure, it will mean that some parameters will not be correctly saved.
try:
module_env = importlib.import_module(env_module_path)
except ImportError:
raise AssertionError("Error: could not import module {}, ".format(env_module_path) +
"Halting execution. Are you sure this is a valid environement?")
return module_env, class_name, env_module_path
def makeEnv(env_id, seed, rank, log_dir, allow_early_resets=False, env_kwargs=None):
"""
Instantiate gym env
:param env_id: (str)
:param seed: (int)
:param rank: (int)
:param log_dir: (str)
:param allow_early_resets: (bool) Allow reset before the enviroment is done
:param env_kwargs: (dict) The extra arguments for the environment
"""
# define a place holder function to be returned to the caller.
def _thunk():
local_env_kwargs = dict(env_kwargs) # copy this to avoid altering the others
local_env_kwargs["env_rank"] = rank
env = _make(env_id, env_kwargs=local_env_kwargs)
env.seed(seed + rank)
if log_dir is not None:
env = bench.Monitor(env, os.path.join(log_dir, str(rank)), allow_early_resets=allow_early_resets)
return env
return _thunk
def _make(id_, env_kwargs=None):
"""
Recreating the gym make function from gym/envs/registration.py
as such as it can support extra arguments for the environment
:param id_: (str) The environment ID
:param env_kwargs: (dict) The extra arguments for the environment
"""
if env_kwargs is None:
env_kwargs = {}
# getting the spec from the ID we want
spec = registry.spec(id_)
# Keeping the checks and safe guards of the old code
assert spec._entry_point is not None, 'Attempting to make deprecated env {}. ' \
'(HINT: is there a newer registered version of this env?)'.format(spec.id_)
if callable(spec._entry_point):
env = spec._entry_point(**env_kwargs)
else:
cls = load(spec._entry_point)
# create the env, with the original kwargs, and the new ones overriding them if needed
env = cls(**{**spec._kwargs, **env_kwargs})
# Make the enviroment aware of which spec it came from.
env.unwrapped.spec = spec
# Keeping the old patching system for _reset, _step and timestep limit
if hasattr(env, "_reset") and hasattr(env, "_step") and not getattr(env, "_gym_disable_underscore_compat", False):
patch_deprecated_methods(env)
if (env.spec.timestep_limit is not None) and not spec.tags.get('vnc'):
from gym.wrappers.time_limit import TimeLimit
env = TimeLimit(env,
max_episode_steps=env.spec.max_episode_steps,
max_episode_seconds=env.spec.max_episode_seconds)
return env
def compareAction(x, y):
"""
:param x:
:param y:
:return:
"""
assert (len(x) == len(y)), "Both array must have the same length."
shared_actions = [i for i in x if i in y]
result = len(shared_actions)/len(x)
return result
| 37.198198
| 118
| 0.673771
|
a1108f39ed20b25988ff4b5d459e225d06c4ada8
| 548
|
py
|
Python
|
deepstar/filesystem/frame_set_dir.py
|
zerofox-oss/deepstar
|
fe0fe12317975104fa6ff6c058d141f11e6e951d
|
[
"BSD-3-Clause-Clear"
] | 44
|
2019-08-09T16:14:27.000Z
|
2022-02-10T06:54:35.000Z
|
deepstar/filesystem/frame_set_dir.py
|
zerofox-oss/deepstar
|
fe0fe12317975104fa6ff6c058d141f11e6e951d
|
[
"BSD-3-Clause-Clear"
] | 2
|
2020-09-26T00:05:52.000Z
|
2021-03-22T13:27:36.000Z
|
deepstar/filesystem/frame_set_dir.py
|
zerofox-oss/deepstar
|
fe0fe12317975104fa6ff6c058d141f11e6e951d
|
[
"BSD-3-Clause-Clear"
] | 14
|
2019-08-19T16:47:32.000Z
|
2022-03-04T03:57:27.000Z
|
import os
from deepstar.filesystem.file_dir import FileDir
class FrameSetDir:
"""
This class implements the FrameSetDir class.
"""
@classmethod
def path(cls):
"""
This method returns the path to the frame set directory.
:rtype: str
"""
return os.path.join(FileDir.path(), 'frame_sets')
@classmethod
def init(cls):
"""
This method initializes the frame set directory.
:rtype: None
"""
os.makedirs(FrameSetDir.path(), exist_ok=True)
| 18.266667
| 64
| 0.593066
|
a6a24ec83324805a35beccda44bf6b2b4c8dae2f
| 11,223
|
py
|
Python
|
experiments/multi_task_preprocess.py
|
vishalbelsare/SocialMediaIE
|
4ecbb4c7082b06454c92e552e4ad3af14a4f38b2
|
[
"Apache-2.0"
] | 14
|
2019-11-04T05:42:56.000Z
|
2021-10-05T08:21:53.000Z
|
experiments/multi_task_preprocess.py
|
vishalbelsare/SocialMediaIE
|
4ecbb4c7082b06454c92e552e4ad3af14a4f38b2
|
[
"Apache-2.0"
] | 6
|
2021-09-08T02:20:11.000Z
|
2022-03-12T00:41:22.000Z
|
experiments/multi_task_preprocess.py
|
vishalbelsare/SocialMediaIE
|
4ecbb4c7082b06454c92e552e4ad3af14a4f38b2
|
[
"Apache-2.0"
] | 2
|
2020-06-17T09:33:50.000Z
|
2021-02-26T00:38:01.000Z
|
# coding: utf-8
# In[1]:
from glob import glob
from collections import Counter
import pandas as pd
from IPython.display import display
# In[2]:
NER_FILES={
"Finin": {
"train": "/datadrive/Datasets/lowlands-data/LREC2014/twitter_ner/data/finin.train.tsv",
"test": "/datadrive/Datasets/lowlands-data/LREC2014/twitter_ner/data/finin.test.tsv.utf8",
},
"Hege": {
"test": "/datadrive/Datasets/lowlands-data/LREC2014/twitter_ner/data/hege.test.tsv",
},
"Ritter": {
"test": "/datadrive/Datasets/lowlands-data/LREC2014/twitter_ner/data/ritter.test.tsv",
},
"WNUT_2016": {
"train": "/datadrive/Codes/multi-task-nlp-keras/data/WNUT_NER/train.tsv",
"test": "/datadrive/Codes/multi-task-nlp-keras/data/WNUT_NER/test.tsv",
"dev": "/datadrive/Codes/multi-task-nlp-keras/data/WNUT_NER/dev.tsv",
},
"WNUT_2017": {
"train": "/datadrive/Codes/multi-task-nlp-keras/data/WNUT_2017/wnut17train.conll",
"dev": "/datadrive/Codes/multi-task-nlp-keras/data/WNUT_2017/emerging.dev.conll",
"test": "/datadrive/Codes/multi-task-nlp-keras/data/WNUT_2017/emerging.test.annotated",
},
"MSM_2013": {
"train": "/datadrive/Datasets/Twitter/MSM2013/data/msm2013-ce_challenge_gs/TweetsTrainingSetCH.tsv.conll",
"test": "/datadrive/Datasets/Twitter/MSM2013/data/msm2013-ce_challenge_gs/goldStandard.tsv.conll",
}
}
POS_FILES={
"Owoputi_2013": {
"train": "/datadrive/Datasets/Twitter/TweeboParser/ark-tweet-nlp-0.3.2/data/twpos-data-v0.3/oct27.splits/oct27.train",
"traindev": "/datadrive/Datasets/Twitter/TweeboParser/ark-tweet-nlp-0.3.2/data/twpos-data-v0.3/oct27.splits/oct27.traindev",
"dev": "/datadrive/Datasets/Twitter/TweeboParser/ark-tweet-nlp-0.3.2/data/twpos-data-v0.3/oct27.splits/oct27.dev",
"test": "/datadrive/Datasets/Twitter/TweeboParser/ark-tweet-nlp-0.3.2/data/twpos-data-v0.3/oct27.splits/oct27.test",
"daily547": "/datadrive/Datasets/Twitter/TweeboParser/ark-tweet-nlp-0.3.2/data/twpos-data-v0.3/daily547.conll"
},
"LexNorm_Li_2015": {
"dev": "/datadrive/Datasets/Twitter/wnut-2017-pos-norm/data/test_L.gold"
},
## Next 3 use Universal POS mappings:
"Foster_2011": {
"test": "/datadrive/Datasets/lowlands-data/ACL2014/crowdsourced_POS/data/foster-twitter.test"
},
"Ritter_2011": {
"test": "/datadrive/Datasets/lowlands-data/ACL2014/crowdsourced_POS/data/ritter.test"
},
"lowlands": {
"test": "/datadrive/Datasets/lowlands-data/ACL2014/crowdsourced_POS/data/lowlands.test"
},
"Gimple_2012": {
"test": "/datadrive/Datasets/lowlands-data/ACL2014/crowdsourced_POS/data/gimpel.GOLD"
},
"Bootstrap_2013": {
# Full PTB tagset, plus four custom tags (USR, HT, RT, URL)
"train": "/datadrive/Datasets/Twitter/twitter-pos-bootstrap/data/bootstrap.conll"
}
}
SENTIMENT_FILES={
"SMILE": {
"train": "/datadrive/Datasets/Twitter/SMILE/smile-annotations-final.csv",
},
}
SUPERSENSE_TAGGING_FILES={
"Ritter": {
"train": "/datadrive/Datasets/Twitter/supersense-data-twitter/ritter-train.tsv",
"dev": "/datadrive/Datasets/Twitter/supersense-data-twitter/ritter-dev.tsv",
"test": "/datadrive/Datasets/Twitter/supersense-data-twitter/ritter-eval.tsv"
},
"Johannsen_2014": {
"test": "/datadrive/Datasets/Twitter/supersense-data-twitter/in-house-eval.tsv"
}
}
FRAME_SEMANTICS_FILE={
"Sogaard_2015": {
"gavin": "/datadrive/Datasets/lowlands-data/AAAI15/conll/all.gavin",
"maria": "/datadrive/Datasets/lowlands-data/AAAI15/conll/all.maria",
"sara": "/datadrive/Datasets/lowlands-data/AAAI15/conll/all.sara"
}
}
DIMSUM_FILES = {
# Following data is already part of dimsum
#"Lowlands": {
# "test": "/datadrive/Datasets/Twitter/dimsum-data/conversion/original/lowlands.UPOS2.tsv"
#},
#"Ritter": {
# "test": "/datadrive/Datasets/Twitter/dimsum-data/conversion/original/ritter.UPOS2.tsv"
#},
#"Streusle": {
# "test": "/datadrive/Datasets/Twitter/dimsum-data/conversion/original/streusle.upos.tags"
#},
"DiMSUM_2016": {
# Made in combination with ritter, streusle, lowlands
# 55579 ewtb
# 3062 lowlands
# 15185 ritter
"train": "/datadrive/Datasets/Twitter/dimsum-data/conll/dimsum16.train",
# 3516 ted
# 6357 trustpilot
# 6627 tweebank
"test": "/datadrive/Datasets/Twitter/dimsum-data/conll/dimsum16.test"
}
}
PARSING_FILES={
"Kong_2014": {
"train": "/datadrive/Datasets/Twitter/TweeboParser/Tweebank/Train_Test_Splited/train",
"test": "/datadrive/Datasets/Twitter/TweeboParser/Tweebank/Train_Test_Splited/test",
}
}
WEB_TREEBANK={
"DenoisedWebTreebank": {
"dev": "/datadrive/Datasets/Twitter/DenoisedWebTreebank/data/DenoisedWebTreebank/dev.conll",
"test": "/datadrive/Datasets/Twitter/DenoisedWebTreebank/data/DenoisedWebTreebank/test.conll"
}
}
NORMALIZED={
"DenoisedWebTreebank": {
"dev": "/datadrive/Datasets/Twitter/DenoisedWebTreebank/data/DenoisedWebTreebank/dev.normalized",
"test": "/datadrive/Datasets/Twitter/DenoisedWebTreebank/data/DenoisedWebTreebank/test.normalized"
}
}
PARAPHRASE_SEMANTIC_FILES={
"SemEval-2015 Task 1": {
# Topic_Id | Topic_Name | Sent_1 | Sent_2 | Label | Sent_1_tag | Sent_2_tag |
# Map labels as follows
# paraphrases: (3, 2) (4, 1) (5, 0)
# non-paraphrases: (1, 4) (0, 5)
# debatable: (2, 3) which you may discard if training binary classifier
"train": "/datadrive/Datasets/Twitter/SemEval-PIT2015-github/data/train.data",
"dev": "/datadrive/Datasets/Twitter/SemEval-PIT2015-github/data/dev.data",
"test": "/datadrive/Datasets/Twitter/SemEval-PIT2015-github/data/test.data"
}
}
# In[3]:
def read_conll_data(filename, ncols=2):
with open(filename, encoding='utf-8') as fp:
for seq in fp.read().split("\n\n"):
seq_ = []
for line in seq.splitlines():
line = line.rstrip()
if not line:
continue
values = line.split("\t")
if len(values) < ncols:
# Skip invalid lines
continue
seq_.append(values)
if not seq_:
seq_ = []
continue
yield seq_
# In[4]:
def get_ner_label(label, idx=1):
if label.upper() == "O":
return label
if idx is None:
return label
return label.split('-', 1)[idx]
def get_simple_label(label):
if label:
return label
return "O"
def get_file_stats(
filename,
label_processor=None,
label_col_id=-1,
skip_other=True,
ncols=2
):
if label_processor is None:
label_processor = lambda x: x
total_seq = 0
total_tokens = 0
token_types = Counter()
for i, seq in enumerate(read_conll_data(filename, ncols=ncols)):
total_seq += 1
total_tokens += len(seq)
try:
for item in seq:
label = label_processor(item[label_col_id])
if skip_other and label == "O":
continue
token_types.update([
label
])
except IndexError:
print(i, seq)
raise
return total_seq, total_tokens, token_types
# In[5]:
def make_conll_dataset_tables(files, **kwargs):
all_stats = []
for datakey in files:
for datatype, filepath in files[datakey].items():
print("{}-{}: {}".format(datakey, datatype, filepath))
total_seq, total_tokens, token_types = get_file_stats(filepath, **kwargs)
print(total_seq, total_tokens, token_types)
all_stats.append((datakey, datatype, total_seq, total_tokens, token_types))
return all_stats
def generate_tables(files, display_df=False, show_labels=True, **kwargs):
all_stats = make_conll_dataset_tables(files, **kwargs)
df = pd.DataFrame(all_stats, columns=[
"datakey", "datatype", "total_seq", "total_tokens", "labels"])
if show_labels:
df = df.assign(
all_labels=df["labels"].apply(lambda x: (", ".join(sorted(x.keys()))).upper())
)
df = df.assign(
num_labels=df["labels"].apply(len),
).sort_values(["datakey", "datatype"])
if display_df:
display(df)
with pd.option_context("display.max_colwidth", -1):
print(df.drop("labels", 1).set_index(["datakey", "datatype"]).to_latex())
display(df.drop("labels", 1).set_index(["datakey", "datatype"]))
# In[6]:
generate_tables(NER_FILES, display_df=True, label_processor=lambda x: get_ner_label(x, idx=1))
# ## POS datasets
# In[7]:
generate_tables(POS_FILES, display_df=False)
# ## Supersense tagging
# In[8]:
generate_tables(SUPERSENSE_TAGGING_FILES, display_df=False)
# In[9]:
generate_tables(SUPERSENSE_TAGGING_FILES, label_processor=lambda x: get_ner_label(x, idx=1))
# ## DimSUM
#
# https://dimsum16.github.io/
# In[10]:
generate_tables(DIMSUM_FILES, label_col_id=7, label_processor=get_simple_label, skip_other=True)
# ## Frame Semantics
#
#
#
# ```
# @paper{AAAI159349,
# author = {Anders Søgaard and Barbara Plank and Hector Alonso},
# title = {Using Frame Semantics for Knowledge Extraction from Twitter},
# conference = {AAAI Conference on Artificial Intelligence},
# year = {2015},
# keywords = {frame semantics; knowledge bases; twitter},
# abstract = {Knowledge bases have the potential to advance artificial intelligence, but often suffer from recall problems, i.e., lack of knowledge of new entities and relations. On the contrary, social media such as Twitter provide abundance of data, in a timely manner: information spreads at an incredible pace and is posted long before it makes it into more commonly used resources for knowledge extraction. In this paper we address the question whether we can exploit social media to extract new facts, which may at first seem like finding needles in haystacks. We collect tweets about 60 entities in Freebase and compare four methods to extract binary relation candidates, based on syntactic and semantic parsing and simple mechanism for factuality scoring. The extracted facts are manually evaluated in terms of their correctness and relevance for search. We show that moving from bottom-up syntactic or semantic dependency parsing formalisms to top-down frame-semantic processing improves the robustness of knowledge extraction, producing more intelligible fact candidates of better quality. In order to evaluate the quality of frame semantic parsing on Twitter intrinsically, we make a multiply frame-annotated dataset of tweets publicly available.},
#
# url = {https://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/view/9349}
# }
#
# ```
# In[11]:
generate_tables(FRAME_SEMANTICS_FILE, show_labels=False, label_col_id=3, label_processor=get_simple_label, skip_other=True)
# In[ ]:
| 35.515823
| 1,260
| 0.661766
|
3d24205ad567517f51acf25ee15983d096432eb0
| 660
|
py
|
Python
|
neighbourhood/forms.py
|
amwaniki180/the_hood
|
12b7201101422d09fc166a964358778f76ed8f27
|
[
"MIT"
] | null | null | null |
neighbourhood/forms.py
|
amwaniki180/the_hood
|
12b7201101422d09fc166a964358778f76ed8f27
|
[
"MIT"
] | 5
|
2020-06-05T22:19:37.000Z
|
2021-09-08T01:13:25.000Z
|
neighbourhood/forms.py
|
amwaniki180/the_hood
|
12b7201101422d09fc166a964358778f76ed8f27
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.models import User
from django.core import validators
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from .models import Profile,Business,Project,Post
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude=['user']
class BusinessForm(forms.ModelForm):
class Meta:
model = Business
exclude = ['owner']
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
exclude=['admin']
class PostForm(forms.ModelForm):
class Meta:
model = Post
exclude=['poster']
| 23.571429
| 73
| 0.672727
|
85ae03ce6c642c4e5870a176c2b754f0f0d830b9
| 1,914
|
py
|
Python
|
app.py
|
oslokommune/okdata-permission-api
|
e12739794fb48437c1b39e1bffc9d632e76d2449
|
[
"MIT"
] | null | null | null |
app.py
|
oslokommune/okdata-permission-api
|
e12739794fb48437c1b39e1bffc9d632e76d2449
|
[
"MIT"
] | 1
|
2022-01-14T09:37:58.000Z
|
2022-01-14T09:37:58.000Z
|
app.py
|
oslokommune/okdata-permission-api
|
e12739794fb48437c1b39e1bffc9d632e76d2449
|
[
"MIT"
] | null | null | null |
import os
from fastapi import FastAPI, Request
from fastapi.exceptions import RequestValidationError
from fastapi.responses import JSONResponse
from okdata.aws.logging import add_fastapi_logging
from pydantic import ValidationError
from resources import (
permissions,
my_permissions,
webhook_tokens,
remove_team_permissions,
)
from resources.errors import ErrorResponse
root_path = os.environ.get("ROOT_PATH", "")
app = FastAPI(
title="Okdata Permission API",
description="API for managing permissions to okdata resources such as datasets",
version="0.1.0",
root_path=root_path,
)
add_fastapi_logging(app)
app.include_router(
permissions.router,
prefix="/permissions",
tags=["permissions"],
)
app.include_router(
my_permissions.router,
prefix="/my_permissions",
tags=["permissions"],
)
# This endpoint is part of a workaround for a bug in keycloak: https://confluence.oslo.kommune.no/pages/viewpage.action?pageId=162566147
app.include_router(
remove_team_permissions.router,
prefix="/remove_team_permissions",
tags=["remove_team_permissions"],
)
app.include_router(webhook_tokens.router, prefix="/webhooks", tags=["webhooks"])
@app.exception_handler(ErrorResponse)
def abort_exception_handler(request: Request, exc: ErrorResponse):
return JSONResponse(status_code=exc.status_code, content={"message": exc.message})
@app.exception_handler(RequestValidationError)
@app.exception_handler(ValidationError)
def abort_validation_error(request: Request, exc):
errors = exc.errors()
# Exclude python-specific
# e.g. 'ctx': {'enum_values': [<WebhookTokenOperation.READ: 'read'>, <WebhookTokenOperation.WRITE: 'write'>]}
for error in errors:
error.pop("ctx", None)
error.pop("type", None)
return JSONResponse(
status_code=400,
content={"message": "Bad Request", "errors": errors},
)
| 28.567164
| 136
| 0.741379
|
168090acf8c05bbf412a703c5d8e9636e45aec75
| 3,725
|
py
|
Python
|
posthog/request.py
|
mands/posthog-python
|
cc5649368f520a46fdd228d316a807f8efbb5855
|
[
"MIT"
] | 12
|
2020-03-24T13:34:49.000Z
|
2022-02-27T00:51:14.000Z
|
posthog/request.py
|
mands/posthog-python
|
cc5649368f520a46fdd228d316a807f8efbb5855
|
[
"MIT"
] | 28
|
2020-02-19T02:19:32.000Z
|
2022-02-28T09:15:38.000Z
|
posthog/request.py
|
mands/posthog-python
|
cc5649368f520a46fdd228d316a807f8efbb5855
|
[
"MIT"
] | 9
|
2020-02-18T21:09:14.000Z
|
2022-03-19T17:31:05.000Z
|
import json
import logging
from datetime import date, datetime
from gzip import GzipFile
from io import BytesIO
from typing import Any, Optional, Union
import requests
from dateutil.tz import tzutc
from posthog.utils import remove_trailing_slash
from posthog.version import VERSION
_session = requests.sessions.Session()
DEFAULT_HOST = "https://app.posthog.com"
USER_AGENT = "posthog-python/" + VERSION
def post(
api_key: str, host: Optional[str] = None, path=None, gzip: bool = False, timeout: int = 15, **kwargs
) -> requests.Response:
"""Post the `kwargs` to the API"""
log = logging.getLogger("posthog")
body = kwargs
body["sentAt"] = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()
url = remove_trailing_slash(host or DEFAULT_HOST) + path
body["api_key"] = api_key
data = json.dumps(body, cls=DatetimeSerializer)
log.debug("making request: %s", data)
headers = {"Content-Type": "application/json", "User-Agent": USER_AGENT}
if gzip:
headers["Content-Encoding"] = "gzip"
buf = BytesIO()
with GzipFile(fileobj=buf, mode="w") as gz:
# 'data' was produced by json.dumps(),
# whose default encoding is utf-8.
gz.write(data.encode("utf-8"))
data = buf.getvalue()
res = _session.post(url, data=data, headers=headers, timeout=timeout)
if res.status_code == 200:
log.debug("data uploaded successfully")
return res
def _process_response(
res: requests.Response, success_message: str, *, return_json: bool = True
) -> Union[requests.Response, Any]:
log = logging.getLogger("posthog")
if not res:
raise APIError(
"N/A",
"Error when fetching PostHog API, please make sure you are using your public project token/key and not a private API key.",
)
if res.status_code == 200:
log.debug(success_message)
return res.json() if return_json else res
try:
payload = res.json()
log.debug("received response: %s", payload)
raise APIError(res.status_code, payload["detail"])
except ValueError:
raise APIError(res.status_code, res.text)
def decide(api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs) -> Any:
"""Post the `kwargs to the decide API endpoint"""
res = post(api_key, host, "/decide/", gzip, timeout, **kwargs)
return _process_response(res, success_message="Feature flags decided successfully")
def batch_post(
api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs
) -> requests.Response:
"""Post the `kwargs` to the batch API endpoint for events"""
res = post(api_key, host, "/batch/", gzip, timeout, **kwargs)
return _process_response(res, success_message="data uploaded successfully", return_json=False)
def get(api_key: str, url: str, host: Optional[str] = None, timeout: Optional[int] = None) -> requests.Response:
url = remove_trailing_slash(host or DEFAULT_HOST) + url
res = requests.get(url, headers={"Authorization": "Bearer %s" % api_key, "User-Agent": USER_AGENT}, timeout=timeout)
return _process_response(res, success_message=f"GET {url} completed successfully")
class APIError(Exception):
def __init__(self, status: Union[int, str], message: str):
self.message = message
self.status = status
def __str__(self):
msg = "[PostHog] {0} ({1})"
return msg.format(self.message, self.status)
class DatetimeSerializer(json.JSONEncoder):
def default(self, obj: Any):
if isinstance(obj, (date, datetime)):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
| 35.47619
| 135
| 0.668993
|
014fda53fd58a4e17ea47c09e4ec99af575b23ea
| 657
|
py
|
Python
|
AthleticTeam/SponsorsApp/migrations/0001_initial.py
|
Temeteron/Athletic-Team
|
cd4407ac2a7dd543d1120f5a55908fbe2e49c263
|
[
"MIT"
] | 1
|
2017-04-26T00:08:20.000Z
|
2017-04-26T00:08:20.000Z
|
AthleticTeam/SponsorsApp/migrations/0001_initial.py
|
Temeteron/Athletic-Team
|
cd4407ac2a7dd543d1120f5a55908fbe2e49c263
|
[
"MIT"
] | null | null | null |
AthleticTeam/SponsorsApp/migrations/0001_initial.py
|
Temeteron/Athletic-Team
|
cd4407ac2a7dd543d1120f5a55908fbe2e49c263
|
[
"MIT"
] | 2
|
2016-05-31T21:14:32.000Z
|
2021-09-14T18:59:15.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Sponsor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.ImageField(default='photos/index.png', upload_to='photos/', blank=True)),
('name', models.CharField(max_length=30, blank=True)),
('info', models.TextField(blank=True)),
],
),
]
| 28.565217
| 114
| 0.582953
|
efa6832d4f1506fa48f3ed4e34e71770cb91f6a5
| 4,087
|
py
|
Python
|
meerk40t/core/node/elem_ellipse.py
|
dpticar/meerk40t
|
63c4dea8cd1c71bbf8cdf4cb3090f106cdcf0e29
|
[
"MIT"
] | null | null | null |
meerk40t/core/node/elem_ellipse.py
|
dpticar/meerk40t
|
63c4dea8cd1c71bbf8cdf4cb3090f106cdcf0e29
|
[
"MIT"
] | null | null | null |
meerk40t/core/node/elem_ellipse.py
|
dpticar/meerk40t
|
63c4dea8cd1c71bbf8cdf4cb3090f106cdcf0e29
|
[
"MIT"
] | null | null | null |
from copy import copy
from meerk40t.core.node.node import Node
from meerk40t.svgelements import Path
class EllipseNode(Node):
"""
EllipseNode is the bootstrapped node type for the 'elem ellipse' type.
"""
def __init__(
self, shape, matrix=None, fill=None, stroke=None, stroke_width=None, **kwargs
):
super(EllipseNode, self).__init__(type="elem ellipse", **kwargs)
self.shape = shape
self.settings = kwargs
if matrix is None:
self.matrix = shape.transform
else:
self.matrix = matrix
if fill is None:
self.fill = shape.fill
else:
self.fill = fill
if stroke is None:
self.stroke = shape.stroke
else:
self.stroke = stroke
if stroke_width is None:
self.stroke_width = shape.stroke_width
else:
self.stroke_width = stroke_width
self.lock = False
def __repr__(self):
return "%s('%s', %s, %s)" % (
self.__class__.__name__,
self.type,
str(self.shape),
str(self._parent),
)
def __copy__(self):
return EllipseNode(
shape=copy(self.shape),
matrix=copy(self.matrix),
fill=copy(self.fill),
stroke=copy(self.stroke),
stroke_width=copy(self.stroke_width),
**self.settings,
)
@property
def bounds(self):
if self._bounds_dirty:
self.shape.transform = self.matrix
self.shape.stroke_width = self.stroke_width
self._bounds = self.shape.bbox(with_stroke=True)
return self._bounds
def preprocess(self, context, matrix, commands):
self.matrix *= matrix
self.shape.transform = self.matrix
self.shape.stroke_width = self.stroke_width
self._bounds_dirty = True
def default_map(self, default_map=None):
default_map = super(EllipseNode, self).default_map(default_map=default_map)
default_map["element_type"] = "Ellipse"
default_map.update(self.settings)
default_map["stroke"] = self.stroke
default_map["fill"] = self.fill
default_map["stroke-width"] = self.stroke_width
default_map["matrix"] = self.matrix
return default_map
def drop(self, drag_node):
# Dragging element into element.
if drag_node.type.startswith("elem"):
self.insert_sibling(drag_node)
return True
return False
def revalidate_points(self):
bounds = self.bounds
if bounds is None:
return
if len(self._points) < 9:
self._points.extend([None] * (9 - len(self._points)))
self._points[0] = [bounds[0], bounds[1], "bounds top_left"]
self._points[1] = [bounds[2], bounds[1], "bounds top_right"]
self._points[2] = [bounds[0], bounds[3], "bounds bottom_left"]
self._points[3] = [bounds[2], bounds[3], "bounds bottom_right"]
cx = (bounds[0] + bounds[2]) / 2
cy = (bounds[1] + bounds[3]) / 2
self._points[4] = [cx, cy, "bounds center_center"]
self._points[5] = [cx, bounds[1], "bounds top_center"]
self._points[6] = [cx, bounds[3], "bounds bottom_center"]
self._points[7] = [bounds[0], cy, "bounds center_left"]
self._points[8] = [bounds[2], cy, "bounds center_right"]
obj = self.shape
if hasattr(obj, "point"):
if len(self._points) <= 11:
self._points.extend([None] * (11 - len(self._points)))
start = obj.point(0)
end = obj.point(1)
self._points[9] = [start[0], start[1], "endpoint"]
self._points[10] = [end[0], end[1], "endpoint"]
def update_point(self, index, point):
return False
def add_point(self, point, index=None):
return False
def as_path(self):
self.shape.transform = self.matrix
self.shape.stroke_width = self.stroke_width
return abs(Path(self.shape))
| 33.77686
| 85
| 0.579398
|
20a0bd1433c87fa7fe99cdbf245447227f208157
| 3,091
|
py
|
Python
|
pkgs/statsmodels-0.6.1-np110py27_0/lib/python2.7/site-packages/statsmodels/emplike/elregress.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2016-09-02T20:31:32.000Z
|
2016-09-02T20:31:32.000Z
|
pkgs/statsmodels-0.6.1-np110py27_0/lib/python2.7/site-packages/statsmodels/emplike/elregress.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/statsmodels-0.6.1-np110py27_0/lib/python2.7/site-packages/statsmodels/emplike/elregress.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-05-02T10:50:15.000Z
|
2021-05-02T10:50:15.000Z
|
"""
Empirical Likelihood Linear Regression Inference
The script contains the function that is optimized over nuisance parameters to
conduct inference on linear regression parameters. It is called by eltest
in OLSResults.
General References
-----------------
Owen, A.B.(2001). Empirical Likelihood. Chapman and Hall
"""
import numpy as np
from statsmodels.emplike.descriptive import _OptFuncts
class _ELRegOpts(_OptFuncts):
"""
A class that holds functions to be optimized over when conducting
hypothesis tests and calculating confidence intervals.
Parameters
----------
OLSResults : Results instance
A fitted OLS result
"""
def __init__(self):
pass
def _opt_nuis_regress(self, nuisance_params, param_nums=None,
endog=None, exog=None,
nobs=None, nvar=None, params=None, b0_vals=None,
stochastic_exog=None):
"""
A function that is optimized over nuisance parameters to conduct a
hypothesis test for the parameters of interest
Parameters
----------
nuisance_params: 1darray
Parameters to be optimized over
Returns
-------
llr : float
-2 x the log-likelihood of the nuisance parameters and the
hypothesized value of the parameter(s) of interest.
"""
params[param_nums] = b0_vals
nuis_param_index = np.int_(np.delete(np.arange(nvar),
param_nums))
params[nuis_param_index] = nuisance_params
new_params = params.reshape(nvar, 1)
self.new_params = new_params
est_vect = exog * \
(endog - np.squeeze(np.dot(exog, new_params))).reshape(nobs, 1)
if not stochastic_exog:
exog_means = np.mean(exog, axis=0)[1:]
exog_mom2 = (np.sum(exog * exog, axis=0))[1:]\
/ nobs
mean_est_vect = exog[:, 1:] - exog_means
mom2_est_vect = (exog * exog)[:, 1:] - exog_mom2
regressor_est_vect = np.concatenate((mean_est_vect, mom2_est_vect),
axis=1)
est_vect = np.concatenate((est_vect, regressor_est_vect),
axis=1)
wts = np.ones(nobs) * (1. / nobs)
x0 = np.zeros(est_vect.shape[1]).reshape(-1, 1)
try:
eta_star = self._modif_newton(x0, est_vect, wts)
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
# the following commented out code is to verify weights
# see open issue #1845
#self.new_weights /= self.new_weights.sum()
#if not np.allclose(self.new_weights.sum(), 1., rtol=0, atol=1e-10):
# raise RuntimeError('weights do not sum to 1')
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
except np.linalg.linalg.LinAlgError:
return np.inf
| 33.597826
| 80
| 0.572307
|
fe4f9a07c11c716383e8794c8172848d872508fe
| 229
|
py
|
Python
|
laguinho/errors/handlers.py
|
MarioHdpz/laguinho-api
|
36ad947e1efe98b0842889d2baf947ed349234b4
|
[
"MIT"
] | 27
|
2019-02-09T04:25:23.000Z
|
2020-05-22T18:34:06.000Z
|
laguinho/errors/handlers.py
|
MarioHdpz/laguinho-api
|
36ad947e1efe98b0842889d2baf947ed349234b4
|
[
"MIT"
] | 51
|
2019-02-15T03:17:53.000Z
|
2021-05-10T23:44:40.000Z
|
laguinho/errors/handlers.py
|
RonnanSouza/laguinho-api
|
cef9ee0c4794597796607b47b72c8a4ae009ca0f
|
[
"MIT"
] | 28
|
2019-02-15T03:16:03.000Z
|
2020-11-25T15:12:41.000Z
|
from flask import Blueprint, jsonify
from marshmallow import ValidationError
errors = Blueprint('errors', __name__)
@errors.app_errorhandler(ValidationError)
def handle_validation_error(e):
return jsonify(e.messages), 422
| 22.9
| 41
| 0.80786
|
02f5bbc2822f7230aaddf25aaacbbef554b3e60a
| 9,955
|
py
|
Python
|
bco_api/api/migrations/0001_initial.py
|
syntheticgio/bco_api
|
b78c691f0508654aac66e6bfdde4fcd548a89ad1
|
[
"MIT"
] | null | null | null |
bco_api/api/migrations/0001_initial.py
|
syntheticgio/bco_api
|
b78c691f0508654aac66e6bfdde4fcd548a89ad1
|
[
"MIT"
] | null | null | null |
bco_api/api/migrations/0001_initial.py
|
syntheticgio/bco_api
|
b78c691f0508654aac66e6bfdde4fcd548a89ad1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-06-24 22:09
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='bco_draft_meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('n_objects', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='bco_publish_meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('n_objects', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='galaxy_draft_meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('n_objects', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='galaxy_publish_meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('n_objects', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='glygen_draft_meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('n_objects', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='glygen_publish_meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('n_objects', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='new_users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('temp_identifier', models.TextField(max_length=100)),
('token', models.TextField(blank=True, null=True)),
('hostname', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='oncomx_draft_meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('n_objects', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='oncomx_publish_meta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('n_objects', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='prefix_groups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prefix', models.CharField(max_length=5)),
('group_owner', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='px_groups',
fields=[
('prefix_groups_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='api.prefix_groups')),
],
bases=('api.prefix_groups',),
),
migrations.CreateModel(
name='oncomx_publish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField()),
('schema', models.TextField()),
('state', models.TextField()),
('contents', models.JSONField()),
('object_class', models.TextField(blank=True, null=True)),
('owner_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='oncomx_draft',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField()),
('schema', models.TextField()),
('state', models.TextField()),
('contents', models.JSONField()),
('object_class', models.TextField(blank=True, null=True)),
('owner_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='glygen_publish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField()),
('schema', models.TextField()),
('state', models.TextField()),
('contents', models.JSONField()),
('object_class', models.TextField(blank=True, null=True)),
('owner_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='glygen_draft',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField()),
('schema', models.TextField()),
('state', models.TextField()),
('contents', models.JSONField()),
('object_class', models.TextField(blank=True, null=True)),
('owner_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='galaxy_publish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField()),
('schema', models.TextField()),
('state', models.TextField()),
('contents', models.JSONField()),
('object_class', models.TextField(blank=True, null=True)),
('owner_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='galaxy_draft',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField()),
('schema', models.TextField()),
('state', models.TextField()),
('contents', models.JSONField()),
('object_class', models.TextField(blank=True, null=True)),
('owner_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='bco_publish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField()),
('schema', models.TextField()),
('state', models.TextField()),
('contents', models.JSONField()),
('object_class', models.TextField(blank=True, null=True)),
('owner_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.group')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='bco_draft',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField()),
('schema', models.TextField()),
('state', models.TextField()),
('contents', models.JSONField()),
('object_class', models.TextField(blank=True, null=True)),
('owner_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.group')),
],
options={
'abstract': False,
},
),
]
| 40.79918
| 201
| 0.512808
|
416aba56a0ee2a6393ae37234052d8c4c59651c9
| 10,581
|
py
|
Python
|
victoriaepi/seid.py
|
crissthiandi/Victoria
|
2d7e3621b472146a262745900ab143ba18ba0340
|
[
"BSD-3-Clause"
] | 3
|
2021-01-14T15:58:28.000Z
|
2021-02-17T18:16:59.000Z
|
victoriaepi/seid.py
|
crissthiandi/Victoria
|
2d7e3621b472146a262745900ab143ba18ba0340
|
[
"BSD-3-Clause"
] | 6
|
2021-02-11T18:38:03.000Z
|
2021-04-18T02:23:44.000Z
|
victoriaepi/seid.py
|
crissthiandi/Victoria
|
2d7e3621b472146a262745900ab143ba18ba0340
|
[
"BSD-3-Clause"
] | 4
|
2020-12-09T19:06:38.000Z
|
2021-03-28T12:38:19.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 3 20:32:32 2020
@author: jac
"""
import sys
import numpy as np
import scipy.stats as ss
from scipy import integrate
import matplotlib.pyplot as plt
from . import victoria
def odeint( rhs, X0, t_quad, args):
"""
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html
Parameters
----------
rhs : callable(y, t, …)
Computes the derivative of y at t.
X0 : array
Initial condition on y (can be a vector).
t_quad : array
A sequence of time points for which to solve for y. The initial value point should be the first element of this sequence. This sequence must be monotonically increasing or monotonically decreasing; repeated values are allowed.
args : tuple
Extra arguments to pass to function.
Returns::
scipy.integrate.odeint( rhs, X0, t_quad, args)
"""
return integrate.odeint( rhs, X0, t_quad, args)
#return rungekuttaodeint( rhs, X0, t_quad, h=1/15, args=args, Method=3)[0]
def Model_SEID( m_list, e_list, prn=True):
"""
Define the graph matrix describing the model.
.. literalinclude:: ../victoriaepi/seid.py
:pyobject: Model_SEID
:lines: 1,10-
"""
T = victoria.AuxMatrix(names="S E I D", prn=prn)
T.BaseVar('S')
T.Exit( 'S', 'E')
T.Exit( 'E', 'I')
T.Exit( 'I', 'D')
T.NoExit('D')
T.End()
# Split in Erlang series of length m
T.SplitErlang( e_list, m_list)
return T
class SEID(victoria.mcmc):
"""
ODE model of a SEID model, and parameter inference using
Bayesian inference (MCMC with the twalk).
Args:
Region: Region
N: population size
data_fnam: Data file name, workdir/data/data_fnam
Make sure to process data into a vertical text array
out_fnam: MCMC output file name, without .txt, workdir/output/out_fnam + '.txt'
init_index: day number from data start, where to start the plot
init: date of init_index
R_rates: Residence rates
trim: how many data to trim
"""
def __init__( self, Region, N, data_fnam, out_fnam,\
init_index, init, R_rates, trim, workdir="./../"):
super().__init__(Region=Region, N=N, data_fnam=data_fnam, out_fnam=out_fnam,\
init_index=init_index, init=init, trim=trim, workdir=workdir)
self.Init_fm_matrix( m=self.data.shape[0], R_rates=R_rates)
def Init_fm_matrix( self, m, R_rates):
"""
Init the forward map.
Args:
m (int): is the number of observed days, ie. sample size.
R_rates (dict): dictionary of residence rates, `R_rates={ 'E' :[1/1.5, r'\sigma_1'], 'I^S':[1/2 , r'\sigma_2']}`
"""
self.num_pars = 2 # Number of parameters to be inferred: S(0) and contact rate
self.R_rates = R_rates
m_list=[self.R_rates[v][2] for v in self.R_rates.keys()]
e_list=list(self.R_rates.keys())
# Define the graph matrix describing the model (see above)
self.T = Model_SEID( m_list, e_list, prn=True)
# The rhs will be ('@' matrix multiplication of arrays in Python 3):
#Graph matrix State vars Erlang mask par (see below)
#rhs(x)= M @ x * E @ par
# qxq qx1 qxn nx1
# Before this, par is filled with non-linear terms etc. like the force of infection
# n original number of state variables
# q final number of state variables after Erlang series
self.n = self.T.n
self.q = self.T.q # Total number of state variables
# Call the base class Init_fm_matrix
# p=1 size of the return list for solve, daily deaths
# quad_k=1 number of subdivions in day, no quadrature is needed
super().Init_fm_matrix( m, num_state_vars=self.q, p=1, quad_k=1)
self.result_D = np.zeros(self.nn-1) # To hold result of quadrature
# ""S E I D""
self.par = np.zeros(self.n) # Par mask
self.R_rates = R_rates
# Known parameters, set residence rates:
for v in R_rates.keys():
self.par[self.T.ConvertVar(v)] = R_rates[v][0]
# The masks to select variables from list of state variables
self.mask_S = self.T.SelectMask('S')
"""Masks to select variables from list of state variables."""
self.mask_E = self.T.SelectMask('E')
"""Masks to select variables from list of state variables."""
self.mask_I = self.T.SelectMask('I')
"""Masks to select variables from list of state variables."""
self.mask_D = self.T.SelectMask('D')
"""Masks to select variables from list of state variables."""
self.X0 = np.zeros((self.q,))
def GetMask( self, v, E_range='all', as_col_vec=False):
"""Returns a mask to select variable `v` from `grp_list`.
E_range = [0] (default), first in the list, or original variable if no Erlang list.
E_range = 'all' use the whole Erlang list for variable
or provide E_range list manually.
"""
return self.T.SelectMask( v, E_range=E_range, as_col_vec=as_col_vec)
def rhs( self, x, t, p):
""" See :meth:`victoriaepi.seid.odeint` to check usage.
.. literalinclude:: ../victoriaepi/seid.py
:pyobject: SEID.rhs
:lines: 1,9-
"""
beta = p[1] #I(0) is p[0]
I = np.sum(x * self.mask_I) # total number of infectious
#force of infection beta1*I^A/N + some factor of beta1*I^S/N
foi = I/self.N * beta
self.par[self.T.ConvertVar('S')] = foi
return self.T.M @ (x * (self.T.par_mask @ self.par))
def solve_plain(self, p, quad=True):
"""
Solve the initial value problem.
.. literalinclude:: ../victoriaepi/seid.py
:pyobject: SEID.solve_plain
:lines: 1,9-
"""
self.X0 *= 0
self.X0 += p[0]*self.mask_I #Initial infected
self.X0 += (self.N-p[0])*self.mask_S #suceptible
if quad:
return odeint(self.rhs, self.X0, self.t_quad, args=(p,))
else:
return odeint(self.rhs, self.X0, self.time, args=(p,))
def solve( self, p):
"""
Solve the initial value problem.
Integral of incidence between observation times
.. literalinclude:: ../victoriaepi/seid.py
:pyobject: SEID.solve
:lines: 1,10-
"""
# Use the solver:
self.soln = self.solve_plain( p, quad=False )
return [np.diff(self.soln[::self.quad_k,:] @ self.mask_D)] # list of size self.p=1
def llikelihood( self, p):
"""
Log likelihood.
.. literalinclude:: ../victoriaepi/seid.py
:pyobject: SEID.llikelihood
:lines: 1,9-
"""
#if support(p): # Not necessary, the twalk checks it already before acllin energy support(p):
# negative binomial likelihood
mu_D = self.solve(p)[0]
mu_D +=3
# negative binomial likelihood for deaths
omega = 2.0
theta = 0.5 #antonio 0.5
r = mu_D/(omega-1.0+theta*mu_D)
q = 1.0/(omega+theta*mu_D)
log_likelihood = np.sum(ss.nbinom.logpmf( self.data+3,r,q))
return log_likelihood
def lprior( self, p):
"""
Log prior.
.. literalinclude:: ../victoriaepi/seid.py
:pyobject: SEID.lprior
:lines: 1,9-
"""
# Log priors:
log_prior = 0.0
# gamma prior distribution parameters for I(0)
log_prior += ss.gamma.logpdf(p[0],1.0,scale=10.0)
# log-normal prior distribution parameters for beta
log_prior += np.sum(ss.lognorm.logpdf(p[1], 1.0, scale=1.0)) #scale=np.exp(0.0)
return log_prior
def support( self, p):
"""
Support.
.. literalinclude:: ../victoriaepi/seid.py
:pyobject: SEID.support
:lines: 1,10-
"""
rt = True
rt &= (0.0 < p[0] < 10.0**2)
# beta in [0,20]
rt &= all((0.0 < p[1]) * (p[3:] < 20.0))
return rt
def sim_init(self):
"""Simulate initial values for mcmc.
.. literalinclude:: ../victoriaepi/seid.py
:pyobject: SEID.sim_init
:lines: 1,8-
"""
p = np.zeros(self.num_pars)
p[0] = np.random.uniform(low = 0.01, high = 10.0)
p[1] = np.random.uniform(low = 0.01, high = 5.0)
return p
def PlotEvolution( self, pred, cumm=False, log=False, ax=None,\
csv_fnam=None, q=[ 10, 25, 50, 75, 90], blue=True, add_MRE=False,\
color='red', color_q='black', label='Mediana', right_axis=True, label_cases=True):
""" Plot Evolution.
Args:
pred: number of days to predict
ty: 0 = Infected,1 = deaths
cumm: True if cumulative, default False
log: True if y log scale, default False
ax: axis where to print the plot (optional)
csv_fnam: name of file to save the data for the plot (optional)
"""
if ax == None:
fig = plt.figure(figsize=(12,10))
ax = fig.gca()
else:
fig = None
data = self.data # Deaths REPORTED
data_trimed = self.data_trimed
title = 'Deaths'
# cumulative or prevalanece, prepapr solns
if cumm:
prevalence = np.cumsum(data) # aggregate observed data
self.future = prevalence[-1] + np.cumsum(data_trimed)
solns = self.solns[0]
ylabel = 'Accumulated cases'
title = 'Accumulated ' + title
else:
prevalence = data # aggregate observed data
self.future = data_trimed
solns = np.diff( np.append( np.zeros((self.solns[0].shape[0],1)), self.solns[0], axis=1), axis=1)
ylabel = 'Num. cases'
title = 'Incidence of ' + title
self.PlotEvolution_fm( solns=solns, prevalence=prevalence, pred=pred, ylabel=ylabel, log=log, ax=ax,\
csv_fnam=csv_fnam, q=q, blue=blue, add_MRE=add_MRE,\
color=color, color_q=color_q, label=label, right_axis=right_axis, label_cases=label_cases)
ax.set_title(self.Region + '. ' + title)
| 32.161094
| 234
| 0.570551
|
2cab92f8a097d461bca7ff25be6e257b54228b80
| 12,306
|
py
|
Python
|
ghome/ghome.py
|
zendainc/ghome
|
1bcbc5bef01945fbcd37f568d5a23c2645bff909
|
[
"Apache-2.0"
] | null | null | null |
ghome/ghome.py
|
zendainc/ghome
|
1bcbc5bef01945fbcd37f568d5a23c2645bff909
|
[
"Apache-2.0"
] | null | null | null |
ghome/ghome.py
|
zendainc/ghome
|
1bcbc5bef01945fbcd37f568d5a23c2645bff909
|
[
"Apache-2.0"
] | null | null | null |
__copyright__ = """
Copyright 2019 Samapriya Roy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
#! /usr/bin/env python
import argparse,os,sys,platform,requests,time,json
import nmap
import socket
os.chdir(os.path.dirname(os.path.realpath(__file__)))
from os.path import expanduser
lpath=os.path.dirname(os.path.realpath(__file__))
sys.path.append(lpath)
# Find google home devices
def ghome(result):
# Using sockets to get iprange
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
iprange = '.'.join(s.getsockname()[0].split('.')[0:3])
s.close()
# Using nmap to get device list
nm = nmap.PortScanner()
scanner = nm.scan(iprange + '.*', '22-25')
l = []
for stuff, value in scanner['scan'].items():
try:
if str(value['addresses']['mac']).startswith('E4'):
l.append(str(value['addresses']['ipv4']))
except Exception as e:
pass
if result is not None and result == 'verbose':
for stuff in l:
url = "http://"+str(stuff)+":8008/setup/eureka_info"
querystring = {"{options}":"detail","{params}":"version,audio,name,build_info,detail,device_info,net,wifi,setup,settings,opt_in,opencast,multizone,proxy,night_mode_params,user_eq,room_equalizer","options":"detail"}
payload = "{\r\n \"connect\": true\r\n}"
headers = {
'Content-Type': "application/json",
'Cache-Control': "no-cache",
}
response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
resp=response.json()
print('Device Name: '+str(resp['name']))
print('Device Locale: '+resp['locale'])
print('Device build_version: '+str(resp['build_version']))
print('Device timezone: '+str(resp['timezone']))
print('Device model_name: '+str(resp['detail']['model_name']))
print('Device manufacturer: '+str(resp['detail']['manufacturer']))
print('Device cast_build_revision: '+str(resp['cast_build_revision']))
print('Device Mac address: '+str(resp['mac_address']))
print('Device IPv4 address: '+str(resp['ip_address']))
print('Wifi Name: '+str(resp['ssid']))
#print('Device uptime: '+str(resp['uptime']))
print('')
else:
for stuff in l:
url = "http://"+str(stuff)+":8008/setup/eureka_info"
querystring = {"{options}":"detail","{params}":"version,audio,name,build_info,detail,device_info,net,wifi,setup,settings,opt_in,opencast,multizone,proxy,night_mode_params,user_eq,room_equalizer","options":"detail"}
payload = "{\r\n \"connect\": true\r\n}"
headers = {
'Content-Type': "application/json",
'Cache-Control': "no-cache",
}
response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
resp=response.json()
print('Device Name: '+str(resp['name'])+' : '+str(resp['ip_address']))
def ghome_from_parser(args):
ghome(result=args.format)
#Get all alarms set on the devices
def alarm(ip):
url = "http://"+str(ip)+":8008/setup/assistant/alarms"
try:
response = requests.request("GET", url).json()
if len(response['alarm']) !=0:
for items in response['alarm']:
print('Alarm set for: '+str(time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime(float(items['fire_time']/1000)))))
else:
print('No alarms currently set')
except Exception as e:
print(e)
def alarm_from_parser(args):
alarm(ip=args.ip)
#Get bluetooth status
def bstat(ip):
url = "http://"+str(ip)+":8008/setup/bluetooth/status"
try:
response = requests.request("GET", url).json()
for key, value in response.items():
print(key, value)
except Exception as e:
print(e)
def bstat_from_parser(args):
bstat(ip=args.ip)
#Get paired devices
def bpair(ip):
url = "http://"+str(ip)+":8008/setup/bluetooth/get_bonded"
try:
response = requests.request("GET", url).json()
for item in response:
print('Device Name: '+str(item['name']))
print('Item currently connected: '+str(item['connected']))
print('Last connected: '+str(time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime(float(item['last_connect_date']/1000)))))
print('')
except Exception as e:
print(e)
def bpair_from_parser(args):
bpair(ip=args.ip)
#Get paired devices
def bscan(ip):
scan_url = "http://"+str(ip)+":8008/setup/bluetooth/scan"
result_url="http://"+str(ip)+":8008/setup/bluetooth/scan_results"
payload='{"enable": true,"clear_results": false,"timeout": 60}'
headers = {'Content-Type': "application/json"}
try:
response = requests.request("POST", scan_url, data=payload, headers=headers)
if response.status_code==200:
print('\n'+'Scan for Bluetooth devices succeeded')
r = requests.request("GET", result_url).json()
for items in r:
if not len(items['name'])==0:
print(str(items['name']+' with mac id: '+str(items['mac_address'])))
else:
print('Unknown device with mac id: '+str(items['mac_address']))
#print(response['name'])
except Exception as e:
print(e)
def bscan_from_parser(args):
bscan(ip=args.ip)
# Bluetooth discovery enable or disable
def bdisc(ip,action):
if action=="enable":
payload = '{"enable_discovery": true}'
if action=="disable":
payload = '{"enable_discovery": false}'
url = "http://"+str(ip)+":8008/setup/bluetooth/discovery"
headers = {
'Content-Type': "application/json",
'Cache-Control': "no-cache",
}
try:
response = requests.request("POST", url, data=payload, headers=headers)
if response.status_code==200:
print("Bluetooth Discovery: "+str(action)+"d")
else:
print("Bluetooth Discovery: "+str(action)+"d failed with error: "+str(response.status_code))
except Exception as e:
print(e)
def bdisc_from_parser(args):
bdisc(ip=args.ip,action=args.action)
# Reboot device
def reboot(ip):
url = "http://"+str(ip)+":8008/setup/reboot"
headers = {
'Content-Type': "application/json",
'Cache-Control': "no-cache",
}
payload={"params": "now"}
try:
response = requests.request("POST", url, data=payload, headers=headers)
if response.status_code==200:
print("Device Rebooting")
else:
print("Device reboot failed with error: "+str(response.status_code))
except Exception as e:
print(e)
def reboot_from_parser(args):
reboot(ip=args.ip)
# DND device
def dnd(ip,action):
url = "http://"+str(ip)+":8008/setup/assistant/notifications"
headers = {
'Content-Type': "application/json",
'Cache-Control': "no-cache",
}
if action=="enable":
payload='{"notifications_enabled": true}'
if action=="disable":
payload='{"notifications_enabled": false}'
try:
response = requests.request("POST", url, data=payload, headers=headers)
if response.status_code==200:
r=response.json()
print("Notification status: "+str(action)+"d")
else:
print("DND action failed with action code : "+str(response.status_code))
except Exception as e:
print(e)
def dnd_from_parser(args):
dnd(ip=args.ip,action=args.action)
#Wifi scan
def wscan(ip):
scan_url = "http://"+str(ip)+":8008/setup/scan_wifi"
result_url="http://"+str(ip)+":8008/setup/scan_results"
headers = {'Content-Type': "application/json"}
try:
response = requests.request("POST", scan_url, headers=headers)
if response.status_code==200:
print('\n'+'Scan for Wifi succeeded')
r = requests.request("GET", result_url).json()
for items in r:
print('Wifi Name or SSID: '+str(items['ssid']))
#print(response['name'])
except Exception as e:
print(e)
def wscan_from_parser(args):
wscan(ip=args.ip)
spacing=" "
def main(args=None):
parser = argparse.ArgumentParser(description='Simple Google Home Mini Client')
subparsers = parser.add_subparsers()
parser_ghome = subparsers.add_parser('list', help='Lists all google home mini devices & IP address')
optional_named = parser_ghome.add_argument_group('Optional named arguments')
optional_named.add_argument('--format', help='User "verbose" to get details', default=None)
parser_ghome.set_defaults(func=ghome_from_parser)
parser_reboot = subparsers.add_parser('reboot', help='Reboot a google home mini using IP address')
required_named = parser_reboot.add_argument_group('Required named arguments.')
required_named.add_argument('--ip', help='Use "ip" for Google Home Mini device', default=None)
parser_reboot.set_defaults(func=reboot_from_parser)
parser_alarm = subparsers.add_parser('alarm', help='Print out the current alarms setup on your google home mini')
required_named = parser_alarm.add_argument_group('Required named arguments.')
required_named.add_argument('--ip', help='Use "ip" for Google Home Mini device', default=None)
parser_alarm.set_defaults(func=alarm_from_parser)
parser_dnd = subparsers.add_parser('dnd', help='Enable or disable <Do not Disturb mode> for a google home mini using IP address')
required_named = parser_dnd.add_argument_group('Required named arguments.')
required_named.add_argument('--ip', help='Use "ip" for Google Home Mini device', default=None)
required_named.add_argument('--action', help='enable|disable do not disturb mode', default=None)
parser_dnd.set_defaults(func=dnd_from_parser)
parser_bstat = subparsers.add_parser('bstat', help='Print current bluetooth status for a google home mini using IP address')
required_named = parser_bstat.add_argument_group('Required named arguments.')
required_named.add_argument('--ip', help='Use "ip" for Google Home Mini device', default=None)
parser_bstat.set_defaults(func=bstat_from_parser)
parser_bscan = subparsers.add_parser('bscan', help='Scan for Bluetooth devices near a google home mini using IP address')
required_named = parser_bscan.add_argument_group('Required named arguments.')
required_named.add_argument('--ip', help='Use "ip" for Google Home Mini device', default=None)
parser_bscan.set_defaults(func=bscan_from_parser)
parser_bpair = subparsers.add_parser('bpair', help='Print current paired bluetooth devices for a google home mini using IP address')
required_named = parser_bpair.add_argument_group('Required named arguments.')
required_named.add_argument('--ip', help='Use "ip" for Google Home Mini device', default=None)
parser_bpair.set_defaults(func=bpair_from_parser)
parser_bdisc = subparsers.add_parser('bdisc', help='Enable or disable bluetooth discovery for a google home mini using IP address')
required_named = parser_bdisc.add_argument_group('Required named arguments.')
required_named.add_argument('--ip', help='Use "ip" for Google Home Mini device', default=None)
required_named.add_argument('--action', help='enable|disable bluetooth discovery', default=None)
parser_bdisc.set_defaults(func=bdisc_from_parser)
parser_wscan = subparsers.add_parser('wscan', help='Scan for Wifi networks near a google home mini using IP address')
required_named = parser_wscan.add_argument_group('Required named arguments.')
required_named.add_argument('--ip', help='Use "ip" for Google Home Mini device', default=None)
parser_wscan.set_defaults(func=wscan_from_parser)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| 40.084691
| 226
| 0.667317
|
3f6753f78a1ed310e623aa22c2c082d93983f314
| 9,605
|
py
|
Python
|
chapter10/access_control.py
|
psxz/reinforcement-learning-an-introduction
|
cc839b647a3ae54f07b2cae17a602cd8d25436fe
|
[
"Apache-2.0"
] | 12,197
|
2016-10-04T03:34:49.000Z
|
2022-03-31T12:55:36.000Z
|
chapter10/access_control.py
|
psxz/reinforcement-learning-an-introduction
|
cc839b647a3ae54f07b2cae17a602cd8d25436fe
|
[
"Apache-2.0"
] | 150
|
2017-08-28T14:59:36.000Z
|
2022-03-11T23:21:35.000Z
|
chapter10/access_control.py
|
psxz/reinforcement-learning-an-introduction
|
cc839b647a3ae54f07b2cae17a602cd8d25436fe
|
[
"Apache-2.0"
] | 4,738
|
2016-09-27T07:38:23.000Z
|
2022-03-31T10:09:14.000Z
|
#######################################################################
# Copyright (C) #
# 2016-2018 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# 2016 Kenta Shimada(hyperkentakun@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
from mpl_toolkits.mplot3d.axes3d import Axes3D
from math import floor
import seaborn as sns
#######################################################################
# Following are some utilities for tile coding from Rich.
# To make each file self-contained, I copied them from
# http://incompleteideas.net/tiles/tiles3.py-remove
# with some naming convention changes
#
# Tile coding starts
class IHT:
"Structure to handle collisions"
def __init__(self, size_val):
self.size = size_val
self.overfull_count = 0
self.dictionary = {}
def count(self):
return len(self.dictionary)
def full(self):
return len(self.dictionary) >= self.size
def get_index(self, obj, read_only=False):
d = self.dictionary
if obj in d:
return d[obj]
elif read_only:
return None
size = self.size
count = self.count()
if count >= size:
if self.overfull_count == 0: print('IHT full, starting to allow collisions')
self.overfull_count += 1
return hash(obj) % self.size
else:
d[obj] = count
return count
def hash_coords(coordinates, m, read_only=False):
if isinstance(m, IHT): return m.get_index(tuple(coordinates), read_only)
if isinstance(m, int): return hash(tuple(coordinates)) % m
if m is None: return coordinates
def tiles(iht_or_size, num_tilings, floats, ints=None, read_only=False):
"""returns num-tilings tile indices corresponding to the floats and ints"""
if ints is None:
ints = []
qfloats = [floor(f * num_tilings) for f in floats]
tiles = []
for tiling in range(num_tilings):
tilingX2 = tiling * 2
coords = [tiling]
b = tiling
for q in qfloats:
coords.append((q + b) // num_tilings)
b += tilingX2
coords.extend(ints)
tiles.append(hash_coords(coords, iht_or_size, read_only))
return tiles
# Tile coding ends
#######################################################################
# possible priorities
PRIORITIES = np.arange(0, 4)
# reward for each priority
REWARDS = np.power(2, np.arange(0, 4))
# possible actions
REJECT = 0
ACCEPT = 1
ACTIONS = [REJECT, ACCEPT]
# total number of servers
NUM_OF_SERVERS = 10
# at each time step, a busy server will be free w.p. 0.06
PROBABILITY_FREE = 0.06
# step size for learning state-action value
ALPHA = 0.01
# step size for learning average reward
BETA = 0.01
# probability for exploration
EPSILON = 0.1
# a wrapper class for differential semi-gradient Sarsa state-action function
class ValueFunction:
# In this example I use the tiling software instead of implementing standard tiling by myself
# One important thing is that tiling is only a map from (state, action) to a series of indices
# It doesn't matter whether the indices have meaning, only if this map satisfy some property
# View the following webpage for more information
# http://incompleteideas.net/sutton/tiles/tiles3.html
# @alpha: step size for learning state-action value
# @beta: step size for learning average reward
def __init__(self, num_of_tilings, alpha=ALPHA, beta=BETA):
self.num_of_tilings = num_of_tilings
self.max_size = 2048
self.hash_table = IHT(self.max_size)
self.weights = np.zeros(self.max_size)
# state features needs scaling to satisfy the tile software
self.server_scale = self.num_of_tilings / float(NUM_OF_SERVERS)
self.priority_scale = self.num_of_tilings / float(len(PRIORITIES) - 1)
self.average_reward = 0.0
# divide step size equally to each tiling
self.alpha = alpha / self.num_of_tilings
self.beta = beta
# get indices of active tiles for given state and action
def get_active_tiles(self, free_servers, priority, action):
active_tiles = tiles(self.hash_table, self.num_of_tilings,
[self.server_scale * free_servers, self.priority_scale * priority],
[action])
return active_tiles
# estimate the value of given state and action without subtracting average
def value(self, free_servers, priority, action):
active_tiles = self.get_active_tiles(free_servers, priority, action)
return np.sum(self.weights[active_tiles])
# estimate the value of given state without subtracting average
def state_value(self, free_servers, priority):
values = [self.value(free_servers, priority, action) for action in ACTIONS]
# if no free server, can't accept
if free_servers == 0:
return values[REJECT]
return np.max(values)
# learn with given sequence
def learn(self, free_servers, priority, action, new_free_servers, new_priority, new_action, reward):
active_tiles = self.get_active_tiles(free_servers, priority, action)
estimation = np.sum(self.weights[active_tiles])
delta = reward - self.average_reward + self.value(new_free_servers, new_priority, new_action) - estimation
# update average reward
self.average_reward += self.beta * delta
delta *= self.alpha
for active_tile in active_tiles:
self.weights[active_tile] += delta
# get action based on epsilon greedy policy and @valueFunction
def get_action(free_servers, priority, value_function):
# if no free server, can't accept
if free_servers == 0:
return REJECT
if np.random.binomial(1, EPSILON) == 1:
return np.random.choice(ACTIONS)
values = [value_function.value(free_servers, priority, action) for action in ACTIONS]
return np.random.choice([action_ for action_, value_ in enumerate(values) if value_ == np.max(values)])
# take an action
def take_action(free_servers, priority, action):
if free_servers > 0 and action == ACCEPT:
free_servers -= 1
reward = REWARDS[priority] * action
# some busy servers may become free
busy_servers = NUM_OF_SERVERS - free_servers
free_servers += np.random.binomial(busy_servers, PROBABILITY_FREE)
return free_servers, np.random.choice(PRIORITIES), reward
# differential semi-gradient Sarsa
# @valueFunction: state value function to learn
# @maxSteps: step limit in the continuing task
def differential_semi_gradient_sarsa(value_function, max_steps):
current_free_servers = NUM_OF_SERVERS
current_priority = np.random.choice(PRIORITIES)
current_action = get_action(current_free_servers, current_priority, value_function)
# track the hit for each number of free servers
freq = np.zeros(NUM_OF_SERVERS + 1)
for _ in tqdm(range(max_steps)):
freq[current_free_servers] += 1
new_free_servers, new_priority, reward = take_action(current_free_servers, current_priority, current_action)
new_action = get_action(new_free_servers, new_priority, value_function)
value_function.learn(current_free_servers, current_priority, current_action,
new_free_servers, new_priority, new_action, reward)
current_free_servers = new_free_servers
current_priority = new_priority
current_action = new_action
print('Frequency of number of free servers:')
print(freq / max_steps)
# Figure 10.5, Differential semi-gradient Sarsa on the access-control queuing task
def figure_10_5():
max_steps = int(1e6)
# use tile coding with 8 tilings
num_of_tilings = 8
value_function = ValueFunction(num_of_tilings)
differential_semi_gradient_sarsa(value_function, max_steps)
values = np.zeros((len(PRIORITIES), NUM_OF_SERVERS + 1))
for priority in PRIORITIES:
for free_servers in range(NUM_OF_SERVERS + 1):
values[priority, free_servers] = value_function.state_value(free_servers, priority)
fig = plt.figure(figsize=(10, 20))
plt.subplot(2, 1, 1)
for priority in PRIORITIES:
plt.plot(range(NUM_OF_SERVERS + 1), values[priority, :], label='priority %d' % (REWARDS[priority]))
plt.xlabel('Number of free servers')
plt.ylabel('Differential value of best action')
plt.legend()
ax = fig.add_subplot(2, 1, 2)
policy = np.zeros((len(PRIORITIES), NUM_OF_SERVERS + 1))
for priority in PRIORITIES:
for free_servers in range(NUM_OF_SERVERS + 1):
values = [value_function.value(free_servers, priority, action) for action in ACTIONS]
if free_servers == 0:
policy[priority, free_servers] = REJECT
else:
policy[priority, free_servers] = np.argmax(values)
fig = sns.heatmap(policy, cmap="YlGnBu", ax=ax, xticklabels=range(NUM_OF_SERVERS + 1), yticklabels=PRIORITIES)
fig.set_title('Policy (0 Reject, 1 Accept)')
fig.set_xlabel('Number of free servers')
fig.set_ylabel('Priority')
plt.savefig('../images/figure_10_5.png')
plt.close()
if __name__ == '__main__':
figure_10_5()
| 39.690083
| 116
| 0.659552
|
72e5f725cc940e887a073d2c2f51e2cd22b4863a
| 85
|
py
|
Python
|
carro/apps.py
|
JoseRazo/TiendaOnline
|
cd6d605d1ed4d8c3b03777f7b05cedaca8db4624
|
[
"Apache-2.0"
] | null | null | null |
carro/apps.py
|
JoseRazo/TiendaOnline
|
cd6d605d1ed4d8c3b03777f7b05cedaca8db4624
|
[
"Apache-2.0"
] | null | null | null |
carro/apps.py
|
JoseRazo/TiendaOnline
|
cd6d605d1ed4d8c3b03777f7b05cedaca8db4624
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class CarroConfig(AppConfig):
name = 'carro'
| 14.166667
| 33
| 0.741176
|
2448fffe8929cf7ce6c111f4dd9e454dc4576d70
| 7,739
|
py
|
Python
|
tests/python/unittest/test_tir_transform_vectorize.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
tests/python/unittest/test_tir_transform_vectorize.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2,863
|
2017-08-17T19:55:50.000Z
|
2019-11-04T17:18:41.000Z
|
tests/python/unittest/test_tir_transform_vectorize.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_vectorize_loop():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert len(stmt.body.indices) == 1
assert isinstance(stmt.body.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_vector():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32x4", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert len(stmt.body.indices) == 1
assert isinstance(stmt.body.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_with_if():
n = te.var("n")
x = te.var("x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(x < n):
A[i] = A[i] + 1
with ib.else_scope():
with ib.if_scope(i < n):
A[i] = 2.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.IfThenElse)
assert len(stmt.then_case.indices) == 1
assert isinstance(stmt.then_case.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.then_case.value, tvm.tir.Add)
assert stmt.then_case.value.dtype == "float32x4"
assert isinstance(stmt.else_case, tvm.tir.For)
def test_vectorize_with_if_cond_int64():
m = te.size_var("m", dtype="int64")
A = te.placeholder((m,), name="A", dtype="float32")
B = te.compute((m,), lambda i: te.if_then_else(i < 2, A[i], A[i] * 2), name="B")
s = te.create_schedule(B.op)
x, y = s[B].split(B.op.axis[0], factor=4)
s[B].vectorize(y)
f = tvm.build(s, [A, B], "llvm")
def test_vectorize_let():
v = tvm.tir.Var("v", "float32")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
ib.emit(lambda body: tvm.tir.LetStmt(v, A[i] + 1, body))
A[i] = v + 2
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], ib.get()))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.LetStmt)
assert stmt.value.dtype == "float32x4"
def test_vectorize_with_le_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i <= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_with_ge_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i >= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_if_then_else():
n = te.var("n")
x = te.var("x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
A[i] = tvm.tir.call_intrin("float32", "tir.if_then_else", i > 0, A[i] + 1, A[i])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as k:
with ib.for_range(0, 4, kind="vectorize") as i:
A[k * 4 + i] = tvm.tir.call_intrin(
"float32", "tir.if_then_else", k > 0, A[k * 4 + i], 0
)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert not isinstance(stmt.body, tvm.tir.For)
assert isinstance(stmt.body.value.args[2], tvm.tir.Broadcast)
def test_vectorize_while_fail():
"""A while loop inside a vectorized loop should fail."""
n = 64
num_iter = 10
def test_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
n = C.shape[0]
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
C = ib.buffer_ptr(C)
i = ib.allocate("int32", (1,), name="i", scope="local")
i[0] = 0
with ib.for_range(0, n) as j:
C[j] = 0.0
with ib.for_range(0, n, kind="vectorize") as j:
with ib.while_loop(i[0] < num_iter):
C[j] += A[j] + B[j]
i[0] += 1
return ib.get()
dtype = "float32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_ir(ins[0], ins[1], outs[0]),
name="while_vectorize",
dtype=dtype,
)
s = te.create_schedule(C.op)
try:
tvm.lower(s, [A, B, C], "llvm")
assert False
except tvm.error.TVMError as e:
error_msg = str(e).split("\n")[-1]
expected = "A while loop inside a vectorized loop not supported"
assert expected in error_msg
def test_vectorize_dtype_mismatch():
n = tvm.tir.IntImm("int64", 4)
A = te.compute((n,), lambda i: tvm.tir.IntImm("int64", 2**31 - 1) + i, name="A")
s = te.create_schedule(A.op)
s[A].vectorize(A.op.axis[0])
tvm.lower(s, [A], "llvm", simple_mode=True)
if __name__ == "__main__":
test_vectorize_vector()
test_vectorize_with_if()
test_vectorize_loop()
test_vectorize_if_then_else()
test_vectorize_with_le_cond()
test_vectorize_with_ge_cond()
test_vectorize_let()
test_vectorize_while_fail()
test_vectorize_dtype_mismatch()
| 32.380753
| 88
| 0.617522
|
a3c2072b136e546c58b20afcc9d304aa6dfbb734
| 4,695
|
py
|
Python
|
mac/google-cloud-sdk/lib/googlecloudsdk/api_lib/iamcredentials/util.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | null | null | null |
mac/google-cloud-sdk/lib/googlecloudsdk/api_lib/iamcredentials/util.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | null | null | null |
mac/google-cloud-sdk/lib/googlecloudsdk/api_lib/iamcredentials/util.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the iamcredentials API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import datetime
from googlecloudsdk.api_lib.util import apis_internal
from googlecloudsdk.core import resources
from googlecloudsdk.core.credentials import http as http_creds
from oauth2client import client
def GenerateAccessToken(service_account_id, scopes):
"""Generates an access token for the given service account."""
service_account_ref = resources.REGISTRY.Parse(
service_account_id, collection='iamcredentials.serviceAccounts',
params={'projectsId': '-', 'serviceAccountsId': service_account_id})
# pylint: disable=protected-access
http_client = http_creds.Http(
response_encoding=http_creds.ENCODING,
allow_account_impersonation=False, force_resource_quota=True)
iam_client = apis_internal._GetClientInstance(
'iamcredentials', 'v1', http_client=http_client)
response = iam_client.projects_serviceAccounts.GenerateAccessToken(
iam_client.MESSAGES_MODULE
.IamcredentialsProjectsServiceAccountsGenerateAccessTokenRequest(
name=service_account_ref.RelativeName(),
generateAccessTokenRequest=iam_client.MESSAGES_MODULE
.GenerateAccessTokenRequest(scope=scopes)
)
)
return response
def GenerateIdToken(service_account_id, audience, include_email=False):
"""Generates an id token for the given service account."""
service_account_ref = resources.REGISTRY.Parse(
service_account_id, collection='iamcredentials.serviceAccounts',
params={'projectsId': '-', 'serviceAccountsId': service_account_id})
# pylint: disable=protected-access
http_client = http_creds.Http(
response_encoding=http_creds.ENCODING,
allow_account_impersonation=False, force_resource_quota=True)
iam_client = apis_internal._GetClientInstance(
'iamcredentials', 'v1', http_client=http_client)
response = iam_client.projects_serviceAccounts.GenerateIdToken(
iam_client.MESSAGES_MODULE
.IamcredentialsProjectsServiceAccountsGenerateIdTokenRequest(
name=service_account_ref.RelativeName(),
generateIdTokenRequest=iam_client.MESSAGES_MODULE
.GenerateIdTokenRequest(audience=audience, includeEmail=include_email)
)
)
return response.token
class ImpersonationAccessTokenProvider(object):
"""A token provider for service account elevation.
This supports the interface required by the core/credentials module.
"""
def GetElevationAccessToken(self, service_account_id, scopes):
response = GenerateAccessToken(service_account_id, scopes)
return ImpersonationCredentials(
service_account_id, response.accessToken, response.expireTime, scopes)
def GetElevationIdToken(self, service_account_id, audience, include_email):
return GenerateIdToken(service_account_id, audience, include_email)
@classmethod
def IsImpersonationCredential(cls, cred):
return isinstance(cred, ImpersonationCredentials)
class ImpersonationCredentials(client.OAuth2Credentials):
"""Implementation of a credential that refreshes using the iamcredentials API.
"""
_EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, service_account_id, access_token, token_expiry, scopes):
self._service_account_id = service_account_id
token_expiry = self._ConvertExpiryTime(token_expiry)
super(ImpersonationCredentials, self).__init__(
access_token, None, None, None, token_expiry, None, None, scopes=scopes)
def _refresh(self, http):
# client.Oauth2Credentials converts scopes into a set, so we need to convert
# back to a list before making the API request.
response = GenerateAccessToken(self._service_account_id, list(self.scopes))
self.access_token = response.accessToken
self.token_expiry = self._ConvertExpiryTime(response.expireTime)
def _ConvertExpiryTime(self, value):
return datetime.datetime.strptime(value,
ImpersonationCredentials._EXPIRY_FORMAT)
| 40.128205
| 80
| 0.771885
|
ebea323c7105a51cc0c5c47dd68359cc4a063fda
| 7,090
|
py
|
Python
|
feature_extraction/generate_features_alexnet.py
|
cJarvers/algonauts2021
|
0c1d6b9c6cbff77ecb1365eaaef0956a4bab96aa
|
[
"MIT"
] | 35
|
2021-05-01T02:58:03.000Z
|
2021-12-27T15:13:17.000Z
|
feature_extraction/generate_features_alexnet.py
|
cJarvers/algonauts2021
|
0c1d6b9c6cbff77ecb1365eaaef0956a4bab96aa
|
[
"MIT"
] | 2
|
2021-06-05T17:34:29.000Z
|
2021-06-22T20:36:19.000Z
|
feature_extraction/generate_features_alexnet.py
|
Neural-Dynamics-of-Visual-Cognition-FUB/Algonauts2021_devkit
|
61c71a39f592861482ef4bfacf91faf981ea3ed3
|
[
"MIT"
] | 21
|
2021-05-06T04:59:09.000Z
|
2021-10-12T21:52:09.000Z
|
###
# This file will:
# 1. Generate and save Alexnet features in a given folder
# 2. preprocess Alexnet features using PCA and save them in another folder
###
import glob
from alexnet import *
import numpy as np
import urllib
import torch
import cv2
import argparse
import time
import random
from tqdm import tqdm
from torchvision import transforms as trn
import os
from PIL import Image
from sklearn.preprocessing import StandardScaler
from torch.autograd import Variable as V
from sklearn.decomposition import PCA, IncrementalPCA
from decord import VideoReader
from decord import cpu
seed = 42
# Torch RNG
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Python RNG
np.random.seed(seed)
random.seed(seed)
def load_alexnet(model_checkpoints):
"""This function initializes an Alexnet and load
its weights from a pretrained model
----------
model_checkpoints : str
model checkpoints location.
Returns
-------
model
pytorch model of alexnet
"""
model = alexnet()
model_file = model_checkpoints
checkpoint = torch.load(model_file, map_location=lambda storage, loc: storage)
model_dict =["conv1.0.weight", "conv1.0.bias", "conv2.0.weight", "conv2.0.bias", "conv3.0.weight", "conv3.0.bias", "conv4.0.weight", "conv4.0.bias", "conv5.0.weight", "conv5.0.bias", "fc6.1.weight", "fc6.1.bias", "fc7.1.weight", "fc7.1.bias", "fc8.1.weight", "fc8.1.bias"]
state_dict={}
i=0
for k,v in checkpoint.items():
state_dict[model_dict[i]] = v
i+=1
model.load_state_dict(state_dict)
if torch.cuda.is_available():
model.cuda()
model.eval()
return model
def sample_video_from_mp4(file, num_frames=16):
"""This function takes a mp4 video file as input and returns
a list of uniformly sampled frames (PIL Image).
Parameters
----------
file : str
path to mp4 video file
num_frames : int
how many frames to select using uniform frame sampling.
Returns
-------
images: list of PIL Images
num_frames: int
number of frames extracted
"""
images = list()
vr = VideoReader(file, ctx=cpu(0))
total_frames = len(vr)
indices = np.linspace(0,total_frames-1,num_frames,dtype=np.int)
for seg_ind in indices:
images.append(Image.fromarray(vr[seg_ind].asnumpy()))
return images,num_frames
def get_activations_and_save(model, video_list, activations_dir):
"""This function generates Alexnet features and save them in a specified directory.
Parameters
----------
model :
pytorch model : alexnet.
video_list : list
the list contains path to all videos.
activations_dir : str
save path for extracted features.
"""
resize_normalize = trn.Compose([
trn.Resize((224,224)),
trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
for video_file in tqdm(video_list):
vid,num_frames = sample_video_from_mp4(video_file)
video_file_name = os.path.split(video_file)[-1].split(".")[0]
activations = []
for frame,img in enumerate(vid):
input_img = V(resize_normalize(img).unsqueeze(0))
if torch.cuda.is_available():
input_img=input_img.cuda()
x = model.forward(input_img)
for i,feat in enumerate(x):
if frame==0:
activations.append(feat.data.cpu().numpy().ravel())
else:
activations[i] = activations[i] + feat.data.cpu().numpy().ravel()
for layer in range(len(activations)):
save_path = os.path.join(activations_dir, video_file_name+"_"+"layer" + "_" + str(layer+1) + ".npy")
avg_layer_activation = activations[layer]/float(num_frames)
np.save(save_path,avg_layer_activation)
def do_PCA_and_save(activations_dir, save_dir):
"""This function preprocesses Neural Network features using PCA and save the results
in a specified directory
.
Parameters
----------
activations_dir : str
save path for extracted features.
save_dir : str
save path for extracted PCA features.
"""
layers = ['layer_1','layer_2','layer_3','layer_4','layer_5','layer_6','layer_7','layer_8']
n_components = 100
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for layer in tqdm(layers):
activations_file_list = glob.glob(activations_dir +'/*'+layer+'.npy')
activations_file_list.sort()
feature_dim = np.load(activations_file_list[0])
x = np.zeros((len(activations_file_list),feature_dim.shape[0]))
for i,activation_file in enumerate(activations_file_list):
temp = np.load(activation_file)
x[i,:] = temp
x_train = x[:1000,:]
x_test = x[1000:,:]
start_time = time.time()
x_test = StandardScaler().fit_transform(x_test)
x_train = StandardScaler().fit_transform(x_train)
ipca = PCA(n_components=n_components,random_state=seed)
ipca.fit(x_train)
x_train = ipca.transform(x_train)
x_test = ipca.transform(x_test)
train_save_path = os.path.join(save_dir,"train_"+layer)
test_save_path = os.path.join(save_dir,"test_"+layer)
np.save(train_save_path,x_train)
np.save(test_save_path,x_test)
def main():
parser = argparse.ArgumentParser(description='Feature Extraction from Alexnet and preprocessing using PCA')
parser.add_argument('-vdir','--video_data_dir', help='video data directory',default = './AlgonautsVideos268_All_30fpsmax/', type=str)
parser.add_argument('-sdir','--save_dir', help='saves processed features',default = './alexnet', type=str)
args = vars(parser.parse_args())
save_dir=args['save_dir']
if not os.path.exists(save_dir):
os.makedirs(save_dir)
video_dir = args['video_data_dir']
video_list = glob.glob(video_dir + '/*.mp4')
video_list.sort()
print('Total Number of Videos: ', len(video_list))
# load Alexnet
# Download pretrained Alexnet from:
# https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth
# and save in the current directory
checkpoint_path = "./alexnet.pth"
if not os.path.exists(checkpoint_path):
url = "https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth"
urllib.request.urlretrieve(url, "./alexnet.pth")
model = load_alexnet(checkpoint_path)
# get and save activations
activations_dir = os.path.join(save_dir)
if not os.path.exists(activations_dir):
os.makedirs(activations_dir)
print("-------------Saving activations ----------------------------")
get_activations_and_save(model, video_list, activations_dir)
# preprocessing using PCA and save
pca_dir = os.path.join(save_dir, 'pca_100')
print("-------------performing PCA----------------------------")
do_PCA_and_save(activations_dir, pca_dir)
if __name__ == "__main__":
main()
| 33.923445
| 276
| 0.654866
|
c78578445adb3b7fb810435dd468db9d06f1ef07
| 1,831
|
py
|
Python
|
knowledge_graph/knowledge_extraction/oie.py
|
ihaeyong/drama-graph
|
60c3c216cd74bb19efd6baf836f6c7c2b42b764f
|
[
"MIT"
] | 3
|
2021-04-28T07:19:39.000Z
|
2022-03-07T09:34:19.000Z
|
knowledge_graph/knowledge_extraction/oie.py
|
ihaeyong/drama-graph
|
60c3c216cd74bb19efd6baf836f6c7c2b42b764f
|
[
"MIT"
] | 18
|
2020-08-24T12:40:38.000Z
|
2022-03-12T00:47:14.000Z
|
knowledge_graph/knowledge_extraction/oie.py
|
ihaeyong/drama-graph
|
60c3c216cd74bb19efd6baf836f6c7c2b42b764f
|
[
"MIT"
] | 1
|
2020-10-15T10:09:20.000Z
|
2020-10-15T10:09:20.000Z
|
from openie import StanfordOpenIE
from stanfordcorenlp import StanfordCoreNLP
import json
def stanfordOIE(texts):
with StanfordOpenIE() as client:
result = []
for text in texts:
result.append(client.annotate(text))
return result
class oie:
def __init__(self, config, input, nlp_parser):
self.input = input
self.config = config
self.nlp_parser = nlp_parser
self.output = self.run()
def run(self):
if self.nlp_parser == None:
if self.config['mode'] == 'demo':
return self.input
self.nlp_parser = StanfordCoreNLP('data/stanford-corenlp-4.0.0')
for ep in self.input:
for scene in ep:
for u in scene['scene']:
for sent in u['sents']:
if self.config['extraction']['oie'] == 'None':
sent['triples'] = []
else:
output = self.nlp_parser.annotate(sent['statement'], properties={
'annotators': 'openie',
'outputFormat': 'json'
})
output = json.loads(output)
sent['triples'] = []
for s in output['sentences']:
for result in s['openie']:
del result['subjectSpan']
del result['relationSpan']
del result['objectSpan']
sent['triples'] += s['openie']
# sent['triples'] = client.annotate(sent['statement'])
print('Stanford Open IE done..')
return self.input
| 36.62
| 93
| 0.452758
|
84d4b7b1f64b985222dee881fbad049a10b1d4c8
| 20,596
|
py
|
Python
|
data/dataset_factory.py
|
burhanmudassar/pytorch-action-detection
|
16afb9312248d73c0e2be56ac733e0a33040307e
|
[
"MIT"
] | null | null | null |
data/dataset_factory.py
|
burhanmudassar/pytorch-action-detection
|
16afb9312248d73c0e2be56ac733e0a33040307e
|
[
"MIT"
] | null | null | null |
data/dataset_factory.py
|
burhanmudassar/pytorch-action-detection
|
16afb9312248d73c0e2be56ac733e0a33040307e
|
[
"MIT"
] | null | null | null |
"""
Dataset utilities - Dataset Class for dataloading
"""
import os
import os.path
import torch
import torch.utils.data as data
import cv2, pickle
import numpy as np
import random
def readsplitfile(splitfile):
with open(splitfile, 'r') as f:
temptrainvideos = f.readlines()
trainvideos = []
for vid in temptrainvideos:
vid = vid.rstrip('\n')
trainvideos.append(vid)
return trainvideos
class AnnotationTransform(object):
"""
Same as original
Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of UCF24's 24 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, CLASSES=None, class_to_ind=None, keep_difficult=False):
# self.class_to_ind = class_to_ind or dict(
# zip(CLASSES, range(len(CLASSES))))
# self.ind_to_class = dict(zip(range(len(CLASSES)),CLASSES))
pass
def __call__(self, bboxs, labels, width, height):
res = []
scale = np.asarray([[width, height, width, height]], dtype=np.float32)
for t in range(len(labels)):
bbox = bboxs[t,:]
label = labels[t]
'''pts = ['xmin', 'ymin', 'xmax', 'ymax']'''
bndbox = []
bbox = np.maximum(0, bbox.astype(np.int32) - 1)
bbox = np.minimum(scale, bbox)
bbox = bbox.astype(np.float32) / scale
bndbox.append(bbox)
bndbox.append(label)
res += [bndbox]
# for i in range(4):
# cur_pt = max(0,int(bbox[i]) - 1)
# scale = width if i % 2 == 0 else height
# cur_pt = min(scale, int(bbox[i]))
# cur_pt = float(cur_pt) / scale
# bndbox.append(cur_pt)
# bndbox.append(label)
# res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
def detection_collate_tubelet(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
image_ids = []
for sample in batch:
imgs.append(sample[0])
# targets.append([torch.FloatTensor(target_frame) for target_frame in sample[1]])
targets.append([torch.FloatTensor(t) for t in sample[1]])
image_ids.append(sample[2])
return torch.stack(imgs, 0), targets, image_ids
class DatasetClass():
"""
Abstract Class for Data Loading based on the Torch Dataset Class
"""
def __init__(self, name='UCF101', path='../data/ucf24/', split=1):
self.name = name
self.root = path
self.split = split
# Load annotations dictionary to memory
self.load_annotations_to_memory()
self.database = self.get_database()
def get_database(self):
raise NotImplementedError
def readsplitfile(self):
raise NotImplementedError
def get_train_test_ratios(self):
ratios = np.array([1] * len(self.CLASSES)) # TODO:use this to calculate train/test ratios
return ratios
def get_video_annotations(self, videoname):
raise NotImplementedError
def get_video_numf(self, videoname):
raise NotImplementedError
def get_video_act(self, videoname):
raise NotImplementedError
def get_video_tubes(self, videoname):
annotations = self.get_video_annotations(videoname)
actidx = self.get_video_act(videoname)
numf = self.get_video_numf(videoname)
num_tubes = len(annotations)
tube_labels = np.zeros((numf ,num_tubes),dtype=np.int16) # check for each tube if present in
tube_boxes = [[[] for _ in range(num_tubes)] for _ in range(numf)]
for tubeid, tube in enumerate(annotations):
# print('numf00', numf, tube['sf'], tube['ef'])
for frame_id in range(tube.shape[0]): # start of the tube to end frame of the tube
frame_num = int(tube[frame_id, 0] - 1)
label = actidx
# assert actidx == label, 'Tube label and video label should be same'
box = tube[frame_id, 1:] # get the box as an array
box = box.astype(np.float32)
# Already in x1 y1 x2 y2 format
try:
tube_labels[frame_num, tubeid] = label+1 # change label in tube_labels matrix to 1 form 0
tube_boxes[frame_num][tubeid] = box # put the box in matrix of lists
except IndexError:
print('Out of bounds annotations')
print('Video: {} Numf: {:d} and Tube Frame: {:d}'.format(videoname, numf, frame_num))
return tube_boxes, tube_labels
def get_frame_path(self, videoname, frame_num):
if self.input_type == 'rgb':
return self.framepath_rgb(videoname, frame_num)
elif self.input_type == 'brox':
return self.framepath_brox(videoname, frame_num)
elif self.input_type == 'fusion':
image_name = [self.framepath_rgb(videoname, frame_num), self.framepath_brox(videoname, frame_num)]
return image_name
def framepath_rgb(self, vid_name, frame_ind):
raise NotImplementedError
def framepath_brox(self, vid_name, frame_ind):
raise NotImplementedError
def get_frame_annotations(self, videoname, frame_num):
# Get frame annotations for 1-indexed frames?
tube_boxes, tube_labels = self.get_video_tubes(videoname)
return np.asarray(tube_boxes[frame_num]), np.asarray(tube_labels[frame_num])
def get_resolution(self, videoname):
'''
return width x height
'''
raise NotImplementedError
def frame_format(self, v, i):
raise NotImplementedError
# Merge from ds_utils
def vlist(self, split):
if split=='train':
return self.trainSet
elif split=='val':
return self.valSet
else:
return self.testSet
def load_annotations_to_memory(self):
raise NotImplementedError
def gtTubes(self, vid_name):
'''
:param vid_name: name of video
:return: tubes corresponding to that video. Dict indexed by class label
Each member contains multiple N x 5 arrays [frame index, boxes]
box format <x1 y1 x2 y2>
'''
return self.database['gttubes'][vid_name]
# Merge from ds_utils
def nframes(self, vid_name):
return self.get_video_numf(vid_name)
def tubes_unrolled(self, vid_name):
gttubes=self.gtTubes(vid_name)
allTubes = []
tube_ind = 0
for label, tubes in gttubes.items():
for tube in tubes:
lentube = tube.shape[0]
ind_tube = np.expand_dims(np.asarray([tube_ind] * lentube), 1)
label_tube = np.expand_dims(np.asarray([label] * lentube), 1)
final_tube = np.concatenate([ind_tube, label_tube, tube], axis=1)
allTubes.append(final_tube)
tube_ind += 1
return allTubes
class Dataset_plus_Torch_Class(data.Dataset, DatasetClass):
def __init__(self, name='UCF101', path='../data/ucf24/', split=1):
DatasetClass.__init__(self, name=name, path=path, split=split)
@staticmethod
def fromtorch(cls, root, image_set, transform=None, target_transform=None,
dataset_name='ucf24', input_type='rgb', full_test=False, num_K=1, split=1, interval=1):
newcls = cls('', root, split)
newcls.torchinit(root, image_set, transform, target_transform,
dataset_name, input_type, full_test, num_K, split, interval)
return newcls
def torchinit(self, root, image_set, transform=None, target_transform=None,
dataset_name='ucf24', input_type='rgb', full_test=False, num_K=1, split=1, interval=1):
self.splitfile = root + 'splitfiles/trainlist{:02d}.txt'.format(split)
self.input_type = input_type
if input_type == 'fusion':
self._imgpath = [os.path.join(root, i) for i in ['rgb-images', 'brox-images']]
else:
self._imgpath = os.path.join(root, self.input_type + '-images')
self.root = root
self.image_set = image_set
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
self._annopath = os.path.join(root, 'labels/', '%s.txt')
self.ids = list()
self.K = num_K
self.split = split
self.interval = interval
annotationlist, video_list = self.make_lists(self, fulltest=full_test)
self.video_list = video_list
self.ids = annotationlist
@staticmethod
def make_lists(dataset, fulltest=False):
trainvideos, valvideos, testvideos = dataset.readsplitfile()
istrain = False
if dataset.image_set == 'train':
allvideos = trainvideos
istrain = True
elif dataset.image_set == 'val':
allvideos = valvideos
elif dataset.image_set == 'test':
allvideos = testvideos
else:
print("Invalid Image Set")
raise ValueError
annotationlist = []
# with open(rootpath + '')
action_counts = np.zeros(len(dataset.CLASSES), dtype=np.int32)
ratios = dataset.get_train_test_ratios()
# ratios = np.ones_like(ratios) #TODO:uncomment this line and line 155, 156 to compute new ratios might be useful for JHMDB21
video_list = []
for vid, videoname in enumerate(sorted(allvideos)):
video_list.append(videoname)
actidx = dataset.get_video_act(videoname)
if actidx == 9999:
print("Annotations are not present so skipping video {}", videoname)
continue
if istrain:
step = ratios[actidx]
else:
step =1
numf = dataset.get_video_numf(videoname)
lastf = numf - 1
# if videoname not in trainvideos:
# istrain = False
# step = max(1, ratios[actidx]) * 1.5 # Taken from AMTNET
# if fulltest or self.image_set == 'val':
# step = 1
# lastf = numf
annotations = dataset.get_video_annotations(videoname)
tube_boxes, tube_labels = dataset.get_video_tubes(videoname)
possible_frame_nums = np.arange(0, numf - (dataset.K - 1), step) # [0, videolength-K]
if numf - step - 1 not in possible_frame_nums and not istrain:
possible_frame_nums = np.append(possible_frame_nums, numf - step - 1)
# print('numf',numf,possible_frame_nums[-1])
for frame_num in possible_frame_nums: # loop from start to last possible frame which can make a legit sequence
frame_num = np.int32(frame_num)
# Only for fulltest mode we will fix interval to a single range
if not fulltest:
interval = np.random.randint(1, dataset.interval+1)
else:
interval = dataset.interval
frame_range = dataset.get_frame_range(frame_num, interval)
# Invalid frame range
if frame_range[-1] >= numf:
continue
check_tubes = tube_labels[frame_range, :]
if np.any(np.sum(check_tubes > 0,
axis=0) > dataset.K - 1): # check if there aren't any semi overlapping tubes
sample_boxes = []
sample_labels = []
tube_Ids = []
image_name = dataset.get_frame_path(videoname, frame_num + 1)
if dataset.input_type == 'fusion':
for img_ in image_name:
assert os.path.isfile(img_), 'Image does not exist' + img_
else:
assert os.path.isfile(image_name), 'Image does not exist' + image_name
for tubeid, tube in enumerate(annotations):
# if tube_labels[frame_num, tubeid] > 0:
tubelet_box = np.asarray([tube_boxes[i][tubeid] for i in frame_range])
tubelet_label = np.asarray([tube_labels[i][tubeid] for i in frame_range])
tubelet_label = np.unique(tubelet_label)
# assert len(tubelet_label) == 1, 'Label for a tube should not change'
if len(tubelet_label) != 1: # Skip semi-overlapping tubes
continue
if tubelet_label == 0: # Skip negative detection
continue
sample_boxes.append(tubelet_box)
sample_labels.append(tubelet_label)
if istrain: # if it is training video
annotationlist.append(
[vid, frame_range, np.asarray(sample_labels) - 1, np.asarray(sample_boxes)])
for label in sample_labels:
action_counts[label - 1] += 1
elif dataset.image_set == 'val':
annotationlist.append(
[vid, frame_range, np.asarray(sample_labels) - 1, np.asarray(sample_boxes)]
)
for label in sample_labels:
action_counts[label - 1] += 1
elif dataset.image_set == 'test': # if test video and has micro-tubes with GT
annotationlist.append(
[vid, frame_range, np.asarray(sample_labels) - 1, np.asarray(sample_boxes)])
for label in sample_labels:
action_counts[label - 1] += 1
elif dataset.image_set == 'test' and not istrain: # if test video with no ground truth and fulltest is trues
annotationlist.append([vid, frame_range, np.asarray([9999]), np.zeros((1, 1, 4))])
for actidx, act_count in enumerate(
action_counts): # just to see the distribution of train and test sets
print('{:05d} action {:02d} {:s}'.format(act_count,
int(actidx),
dataset.CLASSES[actidx]))
# newratios = action_counts/1000
# print('new ratios', newratios)
# print('older ratios', ratios)
print(dataset.image_set, len(annotationlist))
return annotationlist, video_list
def __getitem__(self, index):
im, gt, img_index = self.pull_item(index)
return im, gt, img_index
def __len__(self):
return len(self.ids)
def get_frame_range(self, frame_num, interval=1):
'''
:param frame_num: Return extent of tubelet based on frame_num (0-indexed) and K
:return:
'''
return list(range(frame_num, frame_num + self.K * interval, interval))
def pull_item(self, index):
annot_info = self.ids[index]
frame_range = annot_info[1]
video_id = annot_info[0]
videoname = self.video_list[video_id]
step = 1
img_names = []
img_names += [self.get_frame_path(videoname, i+1) for i in frame_range]
if self.input_type == 'fusion':
img_names = [i for sublist in img_names for i in sublist]
try:
imgs = [cv2.imread(img_).astype(np.float32) for img_ in img_names]
except AttributeError:
pass
height, width, channels = imgs[0].shape
target_tubelet = self.target_transform(annot_info[3], annot_info[2], width, height) # Normalizes boxes + Clip
boxes = np.asarray([t[0][:, :4] for t in target_tubelet])
labels = np.asarray([t[1] for t in target_tubelet])
imgs, boxes, labels = self.transform(imgs, boxes, labels)
imgs = [torch.from_numpy(img[:, :, (2, 1, 0)]).permute(2, 0, 1) for img in imgs]
targets = [boxes, labels]
return torch.stack(imgs, 0), targets, index
def pull_image_tubelet(self, index):
annot_info = self.ids[index]
frame_range= annot_info[1]
video_id = annot_info[0]
videoname = self.video_list[video_id]
step = 1
img_names = []
img_names += [self.get_frame_path(videoname, i+1) for i in frame_range]
# Flatten list if fusion mode
if self.input_type == 'fusion':
img_names = [i for sublist in img_names for i in sublist]
imgs = [cv2.imread(img_).astype(np.float32) for img_ in img_names]
return imgs
import re
def returnValSet(dataset, trainSet):
# Some groups are divided into clips ... To ensure generalization, remove all clips of one group
num_total_clips = len(trainSet)
trainset_seq_list = sorted(trainSet)
# Create groups - match till last number and then return chars preceding that
videos_minus_clip = sorted(set([re.match('(.*?)[0-9]+$', seq).group(1) for seq in trainSet]))
num_groups = len(videos_minus_clip)
# In-place shuffling of groups
valset_seq_list_minus_clip = []
trainset_seq_list_minus_clip = []
ratio=0.1
for cls_ in dataset.CLASSES:
clsVid = [a for a in videos_minus_clip if a.startswith(cls_+'/')]
random.Random(4).shuffle(clsVid)
numvalVideos = max(int(len(clsVid)*(ratio)), 1)
print(cls_, numvalVideos)
valset_seq_list_minus_clip += clsVid[:numvalVideos]
trainset_seq_list_minus_clip += clsVid[numvalVideos:]
# random.Random(4).shuffle(trainset_seq_list_minus_clip)
# ratio = 0.05
# Divide groups into train and val set
# valset_seq_list_minus_clip = trainset_seq_list_minus_clip[int(num_groups*(1-ratio)):]
# valset_seq_list_minus_clip = trainset_seq_list_minus_clip[::idx]
# trainset_seq_list_minus_clip = trainset_seq_list_minus_clip[:int(num_groups * (1 - ratio))]
# Get final list with all clips
# trainset_seq_list = []
# valset_seq_list = []
trainset_seq_list = sorted([a for group in trainset_seq_list_minus_clip for a in trainSet if group in a])
valset_seq_list = sorted([a for group in valset_seq_list_minus_clip for a in trainSet if group in a])
# valset_seq_list = sorted([seq for seq in trainset_seq_list if in valset_seq_list_minus_clip])
# trainset_seq_list = sorted([seq for seq in trainset_seq_list if seq[:idx] not in valset_seq_list_minus_clip])
assert num_total_clips == len(trainset_seq_list) + len(valset_seq_list)
assert len(set(trainset_seq_list).intersection(set(valset_seq_list))) == 0
return trainset_seq_list, valset_seq_list
def CreateValLists(dataset):
trainSet, valSet = returnValSet(dataset, dataset.trainSet)
with open(dataset.root + 'splitfiles/90trainlist{:02d}.txt'.format(dataset.split), 'w') as f:
[f.write(ts+'\n') for ts in trainSet]
with open(dataset.root + 'splitfiles/90vallist{:02d}.txt'.format(dataset.split), 'w') as f:
[f.write(vs+'\n') for vs in valSet]
if __name__ == '__main__':
# # d = UCF24_Dataset(path='./ucf24/',split=1)
# set_ = 'train'
# ssd_dim = 300
# means = (104, 117, 123)
# stds = (0.225, 0.224, 0.229)
# e = Dataset_plus_Torch_Class.fromtorch(MOVEDetection_Tubelet, './move/', set_, SSDAugmentation(ssd_dim, means, stds),
# AnnotationTransform(), input_type='rgb', num_K=2, split=1)
# e.gtTubes(e.video_list[0])
# pass
# Done with 0.5 ratio
CreateValLists(UCF24_Dataset('', path='data/ucf24/', split=1, idx=-4))
| 40.148148
| 133
| 0.597883
|
888ac6519b30b33c64840baa3577caca88e69b64
| 12,868
|
py
|
Python
|
google/cloud/texttospeech_v1/services/text_to_speech/transports/grpc_asyncio.py
|
LaudateCorpus1/python-texttospeech
|
bc5b73fbc62900f89a01486c6e8d42d459c34fd6
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/texttospeech_v1/services/text_to_speech/transports/grpc_asyncio.py
|
LaudateCorpus1/python-texttospeech
|
bc5b73fbc62900f89a01486c6e8d42d459c34fd6
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/texttospeech_v1/services/text_to_speech/transports/grpc_asyncio.py
|
LaudateCorpus1/python-texttospeech
|
bc5b73fbc62900f89a01486c6e8d42d459c34fd6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.texttospeech_v1.types import cloud_tts
from .base import TextToSpeechTransport, DEFAULT_CLIENT_INFO
from .grpc import TextToSpeechGrpcTransport
class TextToSpeechGrpcAsyncIOTransport(TextToSpeechTransport):
"""gRPC AsyncIO backend transport for TextToSpeech.
Service that implements Google Cloud Text-to-Speech API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "texttospeech.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "texttospeech.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_voices(
self,
) -> Callable[
[cloud_tts.ListVoicesRequest], Awaitable[cloud_tts.ListVoicesResponse]
]:
r"""Return a callable for the list voices method over gRPC.
Returns a list of Voice supported for synthesis.
Returns:
Callable[[~.ListVoicesRequest],
Awaitable[~.ListVoicesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_voices" not in self._stubs:
self._stubs["list_voices"] = self.grpc_channel.unary_unary(
"/google.cloud.texttospeech.v1.TextToSpeech/ListVoices",
request_serializer=cloud_tts.ListVoicesRequest.serialize,
response_deserializer=cloud_tts.ListVoicesResponse.deserialize,
)
return self._stubs["list_voices"]
@property
def synthesize_speech(
self,
) -> Callable[
[cloud_tts.SynthesizeSpeechRequest],
Awaitable[cloud_tts.SynthesizeSpeechResponse],
]:
r"""Return a callable for the synthesize speech method over gRPC.
Synthesizes speech synchronously: receive results
after all text input has been processed.
Returns:
Callable[[~.SynthesizeSpeechRequest],
Awaitable[~.SynthesizeSpeechResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "synthesize_speech" not in self._stubs:
self._stubs["synthesize_speech"] = self.grpc_channel.unary_unary(
"/google.cloud.texttospeech.v1.TextToSpeech/SynthesizeSpeech",
request_serializer=cloud_tts.SynthesizeSpeechRequest.serialize,
response_deserializer=cloud_tts.SynthesizeSpeechResponse.deserialize,
)
return self._stubs["synthesize_speech"]
def close(self):
return self.grpc_channel.close()
__all__ = ("TextToSpeechGrpcAsyncIOTransport",)
| 43.918089
| 87
| 0.637784
|
f52a82fdee3a90e59eba0e27df83e041cf454e95
| 5,953
|
py
|
Python
|
api/tacticalrmm/core/models.py
|
jeffreyvh/tacticalrmm
|
dcfb1732954c2c165e82e6b24686e27f9f909eb3
|
[
"MIT"
] | null | null | null |
api/tacticalrmm/core/models.py
|
jeffreyvh/tacticalrmm
|
dcfb1732954c2c165e82e6b24686e27f9f909eb3
|
[
"MIT"
] | null | null | null |
api/tacticalrmm/core/models.py
|
jeffreyvh/tacticalrmm
|
dcfb1732954c2c165e82e6b24686e27f9f909eb3
|
[
"MIT"
] | null | null | null |
from loguru import logger
import pytz
import os
import time
import smtplib
from email.message import EmailMessage
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.postgres.fields import ArrayField
from django.conf import settings
logger.configure(**settings.LOG_CONFIG)
TZ_CHOICES = [(_, _) for _ in pytz.all_timezones]
class CoreSettings(models.Model):
email_alert_recipients = ArrayField(
models.EmailField(null=True, blank=True), null=True, blank=True, default=list,
)
smtp_from_email = models.CharField(
max_length=255, null=True, blank=True, default="from@example.com"
)
smtp_host = models.CharField(
max_length=255, null=True, blank=True, default="smtp.gmail.com"
)
smtp_host_user = models.CharField(
max_length=255, null=True, blank=True, default="admin@example.com"
)
smtp_host_password = models.CharField(
max_length=255, null=True, blank=True, default="changeme"
)
smtp_port = models.PositiveIntegerField(default=587, null=True, blank=True)
smtp_requires_auth = models.BooleanField(default=True)
default_time_zone = models.CharField(
max_length=255, choices=TZ_CHOICES, default="America/Los_Angeles"
)
mesh_token = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_username = models.CharField(max_length=255, null=True, blank=True, default="")
mesh_site = models.CharField(max_length=255, null=True, blank=True, default="")
def save(self, *args, **kwargs):
if not self.pk and CoreSettings.objects.exists():
raise ValidationError("There can only be one CoreSettings instance")
# Only runs on first create
if not self.pk:
mesh_settings = self.get_initial_mesh_settings()
if "mesh_token" in mesh_settings:
self.mesh_token = mesh_settings["mesh_token"]
if "mesh_username" in mesh_settings:
self.mesh_username = mesh_settings["mesh_username"]
if "mesh_site" in mesh_settings:
self.mesh_site = mesh_settings["mesh_site"]
return super(CoreSettings, self).save(*args, **kwargs)
def __str__(self):
return "Global Site Settings"
@property
def email_is_configured(self):
# smtp with username/password authentication
if (
self.smtp_requires_auth
and self.email_alert_recipients
and self.smtp_from_email
and self.smtp_host
and self.smtp_host_user
and self.smtp_host_password
and self.smtp_port
):
return True
# smtp relay
elif (
not self.smtp_requires_auth
and self.email_alert_recipients
and self.smtp_from_email
and self.smtp_host
and self.smtp_port
):
return True
else:
return False
def send_mail(self, subject, body, test=False):
if not self.email_is_configured:
if test:
return "Missing required fields (need at least 1 recipient)"
return False
try:
msg = EmailMessage()
msg["Subject"] = subject
msg["From"] = self.smtp_from_email
msg["To"] = ", ".join(self.email_alert_recipients)
msg.set_content(body)
with smtplib.SMTP(self.smtp_host, self.smtp_port, timeout=20) as server:
if self.smtp_requires_auth:
server.ehlo()
server.starttls()
server.login(self.smtp_host_user, self.smtp_host_password)
server.send_message(msg)
server.quit()
else:
# smtp relay. no auth required
server.send_message(msg)
server.quit()
except Exception as e:
logger.error(f"Sending email failed with error: {e}")
if test:
return str(e)
else:
return True
def get_initial_mesh_settings(self):
mesh_settings = {}
# Check for Mesh Username
try:
if settings.MESH_USERNAME:
mesh_settings["mesh_username"] = settings.MESH_USERNAME
else:
raise AttributeError("MESH_USERNAME doesn't exist")
except AttributeError:
pass
# Check for Mesh Site
try:
if settings.MESH_SITE:
mesh_settings["mesh_site"] = settings.MESH_SITE
else:
raise AttributeError("MESH_SITE doesn't exist")
except AttributeError:
pass
# Check for Mesh Token
try:
if settings.MESH_TOKEN_KEY:
mesh_settings["mesh_token"] = settings.MESH_TOKEN_KEY
else:
raise AttributeError("MESH_TOKEN_KEY doesn't exist")
except AttributeError:
filepath = "/token/token.key"
counter = 0
while counter < 12:
try:
with open(filepath, "r") as read_file:
key = read_file.readlines()
# Remove key file contents for security reasons
with open(filepath, "w") as write_file:
write_file.write("")
# readlines() returns an array. Get first item
mesh_settings["mesh_token"] = key[0].rstrip()
break
except IOError:
pass
counter = counter + 1
time.sleep(10)
return mesh_settings
| 34.812865
| 88
| 0.567109
|
83caa36f3ea0a42adc64817465b46aaf8394d8b9
| 10,357
|
py
|
Python
|
openapi/preprocess_spec.py
|
spiffxp/gen
|
a867e601330b0b3dcfe37d37e74ddc00e1548636
|
[
"Apache-2.0"
] | null | null | null |
openapi/preprocess_spec.py
|
spiffxp/gen
|
a867e601330b0b3dcfe37d37e74ddc00e1548636
|
[
"Apache-2.0"
] | null | null | null |
openapi/preprocess_spec.py
|
spiffxp/gen
|
a867e601330b0b3dcfe37d37e74ddc00e1548636
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import operator
import os.path
import sys
from collections import OrderedDict
import urllib3
# these four constants are shown as part of this example in []:
# "[watch]Pod[List]" is the deprecated version of "[list]Pod?[watch]=True"
WATCH_OP_PREFIX = "watch"
WATCH_OP_SUFFIX = "List"
LIST_OP_PREFIX = "list"
WATCH_QUERY_PARAM_NAME = "watch"
CUSTOM_OBJECTS_SPEC_PATH = os.path.join(
os.path.dirname(__file__),
'custom_objects_spec.json')
_ops = ['get', 'put', 'post', 'delete', 'options', 'head', 'patch']
class PreprocessingException(Exception):
pass
def _title(s):
if len(s) == 0:
return s
return s[0].upper() + s[1:]
def _to_camel_case(s):
return ''.join(_title(y) for y in s.split("_"))
def apply_func_to_spec_operations(spec, func, *params):
"""Apply func to each operation in the spec.
:param spec: The OpenAPI spec to apply func to.
:param func: the function to apply to the spec's operations. It should be
a func(operation, parent) where operation will be each
operation of the spec and parent would be the parent object of
the given operation.
If the return value of the func is True, then the operation
will be deleted from the spec.
"""
for k, v in spec['paths'].items():
for op in _ops:
if op not in v:
continue
if func(v[op], v, *params):
del v[op]
def _has_property(prop_list, property_name):
for prop in prop_list:
if prop["name"] == property_name:
return True
def remove_watch_operations(op, parent, operation_ids):
op_id = op['operationId']
if not op_id.startswith(WATCH_OP_PREFIX):
return
list_id = (LIST_OP_PREFIX +
op_id.replace(WATCH_OP_SUFFIX, "")[len(WATCH_OP_PREFIX):])
if list_id not in operation_ids:
raise PreprocessingException("Cannot find %s" % list_id)
list_op = operation_ids[list_id]
params = []
if 'parameters' in list_op:
params += list_op['parameters']
if 'parameters' in parent:
params += parent['parameters']
if not _has_property(params, WATCH_QUERY_PARAM_NAME):
raise PreprocessingException("%s has no watch query param" % list_id)
return True
def strip_tags_from_operation_id(operation, _):
operation_id = operation['operationId']
if 'tags' in operation:
for t in operation['tags']:
operation_id = operation_id.replace(_to_camel_case(t), '')
operation['operationId'] = operation_id
def add_custom_objects_spec(spec):
with open(CUSTOM_OBJECTS_SPEC_PATH, 'r') as custom_objects_spec_file:
custom_objects_spec = json.loads(custom_objects_spec_file.read())
for path in custom_objects_spec.keys():
if path not in spec['paths'].keys():
spec['paths'][path] = custom_objects_spec[path]
return spec
def process_swagger(spec, client_language):
spec = add_custom_objects_spec(spec)
apply_func_to_spec_operations(spec, strip_tags_from_operation_id)
operation_ids = {}
apply_func_to_spec_operations(spec, lambda op, _: operator.setitem(
operation_ids, op['operationId'], op))
try:
apply_func_to_spec_operations(
spec, remove_watch_operations, operation_ids)
except PreprocessingException as e:
print(e)
remove_model_prefixes(spec)
inline_primitive_models(spec, preserved_primitives_for_language(client_language))
return spec
def preserved_primitives_for_language(client_language):
if client_language == "java":
return ["intstr.IntOrString", "resource.Quantity"]
elif client_language == "csharp":
return ["intstr.IntOrString", "resource.Quantity"]
else:
return []
def rename_model(spec, old_name, new_name):
if new_name in spec['definitions']:
raise PreprocessingException(
"Cannot rename model %s. new name %s exists." %
(old_name, new_name))
find_rename_ref_recursive(spec,
"#/definitions/" + old_name,
"#/definitions/" + new_name)
spec['definitions'][new_name] = spec['definitions'][old_name]
del spec['definitions'][old_name]
def find_rename_ref_recursive(root, old, new):
if isinstance(root, list):
for r in root:
find_rename_ref_recursive(r, old, new)
if isinstance(root, dict):
if "$ref" in root:
if root["$ref"] == old:
root["$ref"] = new
for k, v in root.items():
find_rename_ref_recursive(v, old, new)
def is_model_deprecated(m):
"""
Check if a mode is deprecated model redirection.
A deprecated mode redirecation has only two members with a
description starts with "Deprecated." string.
"""
if len(m) != 2:
return False
if "$ref" not in m or "description" not in m:
return False
return m["description"].startswith("Deprecated.")
def remove_deprecated_models(spec):
"""
In kubernetes 1.8 some of the models are renamed. Our remove_model_prefixes
still creates the same model names but there are some models added to
reference old model names to new names. These models broke remove_model_prefixes
and need to be removed.
"""
models = {}
for k, v in spec['definitions'].items():
if is_model_deprecated(v):
print("Removing deprecated model %s" % k)
else:
models[k] = v
spec['definitions'] = models
def remove_model_prefixes(spec):
"""Remove full package name from OpenAPI model names.
Starting kubernetes 1.6, all models has full package name. This is
verbose and inconvenient in python client. This function tries to remove
parts of the package name but will make sure there is no conflicting model
names. This will keep most of the model names generated by previous client
but will change some of them.
"""
remove_deprecated_models(spec)
models = {}
for k, v in spec['definitions'].items():
if k.startswith("io.k8s"):
models[k] = {"split_n": 2}
conflict = True
while conflict:
for k, v in models.items():
splits = k.rsplit(".", v["split_n"])
v["removed_prefix"] = splits.pop(0)
v["new_name"] = ".".join(splits)
conflict = False
for k, v in models.items():
for k2, v2 in models.items():
if k != k2 and v["new_name"] == v2["new_name"]:
v["conflict"] = True
v2["conflict"] = True
conflict = True
if conflict:
for k, v in models.items():
if "conflict" in v:
print("Resolving conflict for %s" % k)
v["split_n"] += 1
del v["conflict"]
for k, v in models.items():
if "new_name" not in v:
raise PreprocessingException("Cannot rename model %s" % k)
print("Removing prefix %s from %s...\n" % (v["removed_prefix"], k))
rename_model(spec, k, v["new_name"])
def find_replace_ref_recursive(root, ref_name, replace_map):
if isinstance(root, list):
for r in root:
find_replace_ref_recursive(r, ref_name, replace_map)
if isinstance(root, dict):
if "$ref" in root:
if root["$ref"] == ref_name:
del root["$ref"]
for k, v in replace_map.items():
if k in root:
if k != "description":
raise PreprocessingException(
"Cannot inline model %s because of "
"conflicting key %s." % (ref_name, k))
continue
root[k] = v
for k, v in root.items():
find_replace_ref_recursive(v, ref_name, replace_map)
def inline_primitive_models(spec, excluded_primitives):
to_remove_models = []
for k, v in spec['definitions'].items():
if k in excluded_primitives:
continue
if "properties" not in v:
if k == "intstr.IntOrString":
v["type"] = "object"
if "type" not in v:
v["type"] = "object"
print("Making model `%s` inline as %s..." % (k, v["type"]))
find_replace_ref_recursive(spec, "#/definitions/" + k, v)
to_remove_models.append(k)
for k in to_remove_models:
del spec['definitions'][k]
def write_json(filename, object):
with open(filename, 'w') as out:
json.dump(object, out, sort_keys=False, indent=2, separators=(',', ': '), ensure_ascii=True)
def main():
if len(sys.argv) != 4:
print("Usage:\n\n\tpython preprocess_spec.py client_language kubernetes_branch " \
"output_spec_path")
return 1
client_language = sys.argv[1]
spec_url = 'https://raw.githubusercontent.com/kubernetes/kubernetes/' \
'%s/api/openapi-spec/swagger.json' % sys.argv[2]
output_path = sys.argv[3]
pool = urllib3.PoolManager()
with pool.request('GET', spec_url, preload_content=False) as response:
if response.status != 200:
print("Error downloading spec file. Reason: %s" % response.reason)
return 1
in_spec = json.load(response, object_pairs_hook=OrderedDict)
write_json(output_path + ".unprocessed", in_spec)
out_spec = process_swagger(in_spec, client_language)
write_json(output_path, out_spec)
return 0
if __name__ == '__main__':
sys.exit(main())
| 33.517799
| 100
| 0.622478
|
4247e4cb6b1ee6bf1c515f60fb97503dd6b3a7ee
| 879
|
py
|
Python
|
MyDailyNews/src/MyDailyNews/urls.py
|
Jackal007/MyDailyNews
|
964a55abf4b4d1ca7a228a641b9dbd7c6b44e192
|
[
"Unlicense"
] | 2
|
2017-11-21T06:39:42.000Z
|
2020-05-05T08:40:07.000Z
|
MyDailyNews/src/MyDailyNews/urls.py
|
Jackal007/MyDailyNews
|
964a55abf4b4d1ca7a228a641b9dbd7c6b44e192
|
[
"Unlicense"
] | null | null | null |
MyDailyNews/src/MyDailyNews/urls.py
|
Jackal007/MyDailyNews
|
964a55abf4b4d1ca7a228a641b9dbd7c6b44e192
|
[
"Unlicense"
] | null | null | null |
"""MyDailyNews URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include, url
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^dataGetter/', include(('dataGetter.urls', 'dataGetter'), 'dataGetter')),
]
| 36.625
| 83
| 0.709898
|
1929ecea8a05ecaabc8e5702556884cbbfcd8165
| 31,187
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/aio/operations/_virtual_machine_scale_set_vm_run_commands_operations.py
|
benbp/azure-sdk-for-python
|
2329ba03e48098dcdc581898f6434d7c2b13a7b9
|
[
"MIT"
] | null | null | null |
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/aio/operations/_virtual_machine_scale_set_vm_run_commands_operations.py
|
benbp/azure-sdk-for-python
|
2329ba03e48098dcdc581898f6434d7c2b13a7b9
|
[
"MIT"
] | null | null | null |
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/aio/operations/_virtual_machine_scale_set_vm_run_commands_operations.py
|
benbp/azure-sdk-for-python
|
2329ba03e48098dcdc581898f6434d7c2b13a7b9
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineScaleSetVMRunCommandsOperations:
"""VirtualMachineScaleSetVMRunCommandsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
run_command: "_models.VirtualMachineRunCommand",
**kwargs
) -> "_models.VirtualMachineRunCommand":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'runCommandName': self._serialize.url("run_command_name", run_command_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(run_command, 'VirtualMachineRunCommand')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
run_command: "_models.VirtualMachineRunCommand",
**kwargs
) -> AsyncLROPoller["_models.VirtualMachineRunCommand"]:
"""The operation to create or update the VMSS VM run command.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param run_command_name: The name of the virtual machine run command.
:type run_command_name: str
:param run_command: Parameters supplied to the Create Virtual Machine RunCommand operation.
:type run_command: ~azure.mgmt.compute.v2021_03_01.models.VirtualMachineRunCommand
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineRunCommand or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineRunCommand]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
run_command=run_command,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'runCommandName': self._serialize.url("run_command_name", run_command_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
run_command: "_models.VirtualMachineRunCommandUpdate",
**kwargs
) -> "_models.VirtualMachineRunCommand":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'runCommandName': self._serialize.url("run_command_name", run_command_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(run_command, 'VirtualMachineRunCommandUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
run_command: "_models.VirtualMachineRunCommandUpdate",
**kwargs
) -> AsyncLROPoller["_models.VirtualMachineRunCommand"]:
"""The operation to update the VMSS VM run command.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param run_command_name: The name of the virtual machine run command.
:type run_command_name: str
:param run_command: Parameters supplied to the Update Virtual Machine RunCommand operation.
:type run_command: ~azure.mgmt.compute.v2021_03_01.models.VirtualMachineRunCommandUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineRunCommand or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineRunCommand]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
run_command=run_command,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'runCommandName': self._serialize.url("run_command_name", run_command_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json, text/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'runCommandName': self._serialize.url("run_command_name", run_command_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""The operation to delete the VMSS VM run command.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param run_command_name: The name of the virtual machine run command.
:type run_command_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
run_command_name=run_command_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'runCommandName': self._serialize.url("run_command_name", run_command_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
async def get(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
run_command_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.VirtualMachineRunCommand":
"""The operation to get the VMSS VM run command.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param run_command_name: The name of the virtual machine run command.
:type run_command_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineRunCommand, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.VirtualMachineRunCommand
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommand"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'runCommandName': self._serialize.url("run_command_name", run_command_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineRunCommand', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}'} # type: ignore
def list(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
expand: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.VirtualMachineRunCommandsListResult"]:
"""The operation to get all run commands of an instance in Virtual Machine Scaleset.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineRunCommandsListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineRunCommandsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineRunCommandsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachineRunCommandsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands'} # type: ignore
| 51.891847
| 261
| 0.676147
|
db43e36e86ec171237b698bfd3f10aceb5859e31
| 1,243
|
py
|
Python
|
dictionaries/data_handler.py
|
mwhittemore2/vocab_manager
|
05d8ec2bac925a53e1882c645f5e086a540bfe6b
|
[
"MIT"
] | null | null | null |
dictionaries/data_handler.py
|
mwhittemore2/vocab_manager
|
05d8ec2bac925a53e1882c645f5e086a540bfe6b
|
[
"MIT"
] | null | null | null |
dictionaries/data_handler.py
|
mwhittemore2/vocab_manager
|
05d8ec2bac925a53e1882c645f5e086a540bfe6b
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class DataHandler(ABC):
"""
Base class for transforming a dictionary entry
to its final form.
"""
def __init__(self, next_handler):
"""
Initializes the dictionary entry transformer.
Parameters
----------
next_handler : DataHandler
The next transformer to try
"""
self.set_handler(next_handler)
def get_handler(self):
"""
Fetches the next transformer if the current one
is not relevant.
Returns
-------
DataHandler
The next data transformer
"""
return self.next_handler
@abstractmethod
def process_entry(self, entry):
"""
Transforms the dictionary entry as specified
by the derived class.
Parameters
----------
entry : dict
An entry for the foreign language dictionary
"""
pass
def set_handler(self, next_handler):
"""
Saves the next transformer to try.
Parameters
----------
next_handler : DataHandler
The next data transformer to try
"""
self.next_handler = next_handler
| 23.45283
| 56
| 0.5535
|
8ed3e03ab27e865a4e8a6f076efc6d94b735c00e
| 362
|
py
|
Python
|
week_02/inclass/tests/q2_2.py
|
ds-connectors/EPS-88-FA20
|
2ef31ff28e8f26e50929b77b72ee035913b06e5f
|
[
"BSD-3-Clause"
] | 1
|
2021-01-18T07:08:53.000Z
|
2021-01-18T07:08:53.000Z
|
week_02/inclass/tests/q2_2.py
|
ds-connectors/EPS-88-FA20
|
2ef31ff28e8f26e50929b77b72ee035913b06e5f
|
[
"BSD-3-Clause"
] | null | null | null |
week_02/inclass/tests/q2_2.py
|
ds-connectors/EPS-88-FA20
|
2ef31ff28e8f26e50929b77b72ee035913b06e5f
|
[
"BSD-3-Clause"
] | 2
|
2020-12-04T11:05:14.000Z
|
2021-01-12T10:01:26.000Z
|
test = {
'name': 'Question 2_2',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> 2653.75<topo_sigma<2653.85
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| 15.73913
| 40
| 0.342541
|
64054d1b2fe525851da16ccd9f5f410afdfe767a
| 2,909
|
py
|
Python
|
dqn_zoo/shaping.py
|
seblee97/dqn_zoo
|
d5860441aa95e65db6c74c1be3ac8a4999e29ee2
|
[
"Apache-2.0"
] | null | null | null |
dqn_zoo/shaping.py
|
seblee97/dqn_zoo
|
d5860441aa95e65db6c74c1be3ac8a4999e29ee2
|
[
"Apache-2.0"
] | null | null | null |
dqn_zoo/shaping.py
|
seblee97/dqn_zoo
|
d5860441aa95e65db6c74c1be3ac8a4999e29ee2
|
[
"Apache-2.0"
] | 1
|
2021-05-15T15:37:19.000Z
|
2021-05-15T15:37:19.000Z
|
import jax.numpy as jnp
import jax
class NoPenalty:
"""No penalty placeholder."""
def __call__(self, target_q_values, transitions, rng_key):
return transitions.r_t
class HardCodedPenalty:
"""Hard coded constant penalty,
i.e. F(s, a, s') = k where k is constant.
"""
def __init__(self, penalty: float):
self._penalty = penalty
def __call__(self, target_q_values, transitions, rng_key):
penalty_terms = self._penalty * jnp.ones_like(transitions.r_t)
return transitions.r_t + penalty_terms
class UncertaintyPenalty:
"""Adaptive penalty based on uncertainty in state-action values over ensemble."""
def __init__(self, multiplicative_factor: float):
self._multiplicative_factor = multiplicative_factor
def __call__(self, target_q_values, transitions, rng_key):
state_action_values = target_q_values[jnp.arange(len(target_q_values)), :, transitions.a_tm1]
penalty_terms = self._multiplicative_factor * jnp.std(state_action_values, axis=1)
return transitions.r_t + penalty_terms
class PolicyEntropyPenalty:
"""Adaptive penalty based on policy entropy of ensemble."""
def __init__(self, multiplicative_factor: float, num_actions: int):
self._multiplicative_factor = multiplicative_factor
LOG_EPSILON = 0.0001
def compute_entropy(max_indices):
max_index_probabilities = jnp.bincount(max_indices, minlength=num_actions, length=num_actions) / len(max_indices)
entropy = -jnp.sum((max_index_probabilities + LOG_EPSILON) * jnp.log(max_index_probabilities + LOG_EPSILON))
return entropy
self._compute_entropy = jax.vmap(compute_entropy, in_axes=(0))
def __call__(self, target_q_values, transitions, rng_key):
max_indices = jnp.argmax(target_q_values, axis=-1)
penalty_terms = self._multiplicative_factor * self._compute_entropy(max_indices)
return transitions.r_t + penalty_terms
class MunchausenPenalty:
"""Adaptive penalty that adds scaled log policy to the reward.
Based Munchausen RL: https://arxiv.org/pdf/2007.14430.pdf
"""
def __init__(self, multiplicative_factor: float, num_actions: int):
self._multiplicative_factor = multiplicative_factor
LOG_EPSILON = 0.0001
def compute_log_policy(max_indices):
max_index_probabilities = jnp.bincount(max_indices, minlength=num_actions, length=num_actions) / len(max_indices)
log_policy = jnp.log(max_index_probabilities + LOG_EPSILON)
return log_policy
self._compute_log_policy = jax.vmap(compute_log_policy, in_axes=(0))
def __call__(self, target_q_values, transitions, rng_key):
max_indices = jnp.argmax(target_q_values, axis=-1)
log_policy = self._compute_log_policy(max_indices)
action_log_policy = log_policy[jnp.arange(len(log_policy)), transitions.a_tm1]
penalty_terms = self._multiplicative_factor * action_log_policy
return transitions.r_t + penalty_terms
| 35.91358
| 119
| 0.756961
|
b219cc3fdc6a333c40f422228f571517a448a233
| 3,557
|
py
|
Python
|
autogluon/utils/try_import.py
|
brc7/autogluon
|
423ea14694a4db849fb838903d28329888133ea2
|
[
"Apache-2.0"
] | 1
|
2020-08-20T08:30:15.000Z
|
2020-08-20T08:30:15.000Z
|
autogluon/utils/try_import.py
|
brc7/autogluon
|
423ea14694a4db849fb838903d28329888133ea2
|
[
"Apache-2.0"
] | null | null | null |
autogluon/utils/try_import.py
|
brc7/autogluon
|
423ea14694a4db849fb838903d28329888133ea2
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ['try_import_catboost', 'try_import_lightgbm', 'try_import_mxboard', 'try_import_mxnet',
'try_import_cv2', 'try_import_gluonnlp']
def try_import_catboost():
try:
import catboost
except ValueError as e:
raise ImportError("Import catboost failed. Numpy version may be outdated, "
"Please ensure numpy version >=1.16.0. If it is not, please try 'pip uninstall numpy; pip install numpy>=1.17.0' Detailed info: {}".format(str(e)))
def try_import_catboostdev(): # TODO: remove once Catboost 0.24 is released.
try:
import catboost # Need to first import catboost before catboost_dev and not vice-versa
import catboost_dev
except (ValueError, ImportError) as e:
raise ImportError("Import catboost_dev failed (needed for distillation with CatBoost models). "
"Make sure you can import catboost and then run: 'pip install catboost-dev'."
"Detailed info: {}".format(str(e)))
def try_import_lightgbm():
try:
import lightgbm
except OSError as e:
raise ImportError("Import lightgbm failed. If you are using Mac OSX, "
"Please try 'brew install libomp'. Detailed info: {}".format(str(e)))
def try_import_mxboard():
try:
import mxboard
except ImportError:
raise ImportError(
"Unable to import dependency mxboard. "
"A quick tip is to install via `pip install mxboard`. ")
def try_import_mxnet():
mx_version = '1.4.1'
try:
import mxnet as mx
from distutils.version import LooseVersion
if LooseVersion(mx.__version__) < LooseVersion(mx_version):
msg = (
"Legacy mxnet-mkl=={} detected, some new modules may not work properly. "
"mxnet-mkl>={} is required. You can use pip to upgrade mxnet "
"`pip install mxnet-mkl --pre --upgrade` "
"or `pip install mxnet-cu90mkl --pre --upgrade`").format(mx.__version__, mx_version)
raise ImportError(msg)
except ImportError:
raise ImportError(
"Unable to import dependency mxnet. "
"A quick tip is to install via `pip install mxnet-mkl/mxnet-cu90mkl --pre`. ")
def try_import_cv2():
try:
import cv2
except ImportError:
raise ImportError(
"Unable to import dependency cv2. "
"A quick tip is to install via `pip install opencv-python`. ")
def try_import_gluonnlp():
try:
import gluonnlp
# TODO After 1.0 is supported,
# we will remove the checking here and use gluonnlp.utils.check_version instead.
from pkg_resources import parse_version # pylint: disable=import-outside-toplevel
gluonnlp_version = parse_version(gluonnlp.__version__)
assert gluonnlp_version >= parse_version('0.8.1') and\
gluonnlp_version <= parse_version('0.8.3'), \
'Currently, we only support 0.8.1<=gluonnlp<=0.8.3'
except ImportError:
raise ImportError(
"Unable to import dependency gluonnlp. The NLP model won't be available "
"without installing gluonnlp. "
"A quick tip is to install via `pip install gluonnlp==0.8.1`. ")
return gluonnlp
def try_import_faiss():
try:
import faiss
except ImportError:
raise ImportError(
"Unable to import dependency faiss"
"A quick tip is to install via `pip install faiss-cpu`. ")
| 41.847059
| 173
| 0.630025
|
fe2f68f35f08c8e5921f906119928b989c0fe428
| 32,105
|
py
|
Python
|
models/vfs.py
|
wavemind/gcb17ml
|
350d0a82e5d97dd690b7c17e26f11b58d9c9e9ac
|
[
"Apache-2.0"
] | null | null | null |
models/vfs.py
|
wavemind/gcb17ml
|
350d0a82e5d97dd690b7c17e26f11b58d9c9e9ac
|
[
"Apache-2.0"
] | null | null | null |
models/vfs.py
|
wavemind/gcb17ml
|
350d0a82e5d97dd690b7c17e26f11b58d9c9e9ac
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Virtual file system for managing files locally or in the cloud."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
import os
import sys
import threading
import unittest
from config import ConfigProperty
from counters import PerfCounter
from entities import BaseEntity
import jinja2
from common import caching
from common import jinja_utils
from google.appengine.api import namespace_manager
from google.appengine.ext import db
# all caches must have limits
MAX_GLOBAL_CACHE_SIZE_BYTES = 16 * 1024 * 1024
# max size of each item; no point in storing images for example
MAX_GLOBAL_CACHE_ITEM_SIZE_BYTES = 256 * 1024
# The maximum size allowed in a file. The datastore limit of 1Mb with a fudge
# factor shaved off.
MAX_FILE_SIZE = 1024 * 1024 - 10 * 1024
# Global memcache controls.
CAN_USE_VFS_IN_PROCESS_CACHE = ConfigProperty(
'gcb_can_use_vfs_in_process_cache', bool, (
'Whether or not to cache content objects. For production this value '
'should be on to enable maximum performance. For development this '
'value should be off so you can see your changes to course content '
'instantaneously.'), default_value=True)
class AbstractFileSystem(object):
"""A generic file system interface that forwards to an implementation."""
def __init__(self, impl):
self._impl = impl
self._readonly = False
@property
def impl(self):
return self._impl
@classmethod
def normpath(cls, path):
"""Make Windows and Linux filenames to have the same separator '/'."""
# Replace '\' into '/' and force Unicode.
if not path:
return path
return u'' + path.replace('\\', '/')
def begin_readonly(self):
"""Activates caching of resources and prevents mutations."""
self._assert_not_readonly()
self._readonly = True
def end_readonly(self):
"""Deactivates caching of resources and enables mutations."""
if not self._readonly:
raise Exception('Not readonly.')
self._readonly = False
@property
def is_readonly(self):
return self._readonly
def _assert_not_readonly(self):
if self._readonly:
raise Exception(
'Unable to execute requested operation while readonly.')
def isfile(self, filename):
"""Checks if file exists, similar to os.path.isfile(...)."""
return self._impl.isfile(filename)
def open(self, filename):
"""Returns a stream with the file content, similar to open(...)."""
return self._impl.get(filename)
def get(self, filename):
"""Returns bytes with the file content, but no metadata."""
return self.open(filename).read()
def put(self, filename, stream, **kwargs):
"""Replaces the contents of the file with the bytes in the stream."""
self._assert_not_readonly()
self._impl.put(filename, stream, **kwargs)
def delete(self, filename):
"""Deletes a file and metadata associated with it."""
self._assert_not_readonly()
self._impl.delete(filename)
def list(self, dir_name, include_inherited=False):
"""Lists all files in a directory."""
return self._impl.list(dir_name, include_inherited)
def get_jinja_environ(self, dir_names, autoescape=True):
"""Configures jinja environment loaders for this file system."""
return self._impl.get_jinja_environ(dir_names, autoescape=autoescape)
def is_read_write(self):
return self._impl.is_read_write()
def is_draft(self, stream):
if not hasattr(stream, 'metadata'):
return False
if not stream.metadata:
return False
return stream.metadata.is_draft
class LocalReadOnlyFileSystem(object):
"""A read-only file system serving only local files."""
def __init__(self, logical_home_folder=None, physical_home_folder=None):
"""Creates a new instance of the disk-backed read-only file system.
Args:
logical_home_folder: A logical home dir of all files (/a/b/c/...).
physical_home_folder: A physical location on the file system (/x/y).
Returns:
A new instance of the object.
"""
self._logical_home_folder = AbstractFileSystem.normpath(
logical_home_folder)
self._physical_home_folder = AbstractFileSystem.normpath(
physical_home_folder)
def _logical_to_physical(self, filename):
filename = AbstractFileSystem.normpath(filename)
if not (self._logical_home_folder and self._physical_home_folder):
return filename
filename = os.path.join(
self._physical_home_folder,
os.path.relpath(filename, self._logical_home_folder))
return AbstractFileSystem.normpath(filename)
def _physical_to_logical(self, filename):
filename = AbstractFileSystem.normpath(filename)
if not (self._logical_home_folder and self._physical_home_folder):
return filename
filename = os.path.join(
self._logical_home_folder,
os.path.relpath(filename, self._physical_home_folder))
return AbstractFileSystem.normpath(filename)
def isfile(self, filename):
return os.path.isfile(self._logical_to_physical(filename))
def get(self, filename):
if not self.isfile(filename):
return None
return open(self._logical_to_physical(filename), 'rb')
def put(self, unused_filename, unused_stream):
raise Exception('Not implemented.')
def delete(self, unused_filename):
raise Exception('Not implemented.')
# Need argument to be named exactly 'include_inherited' to match
# keyword-parameter names from derived/related classes.
# pylint: disable=unused-argument
def list(self, root_dir, include_inherited=False):
"""Lists all files in a directory."""
files = []
for dirname, unused_dirnames, filenames in os.walk(
self._logical_to_physical(root_dir)):
for filename in filenames:
files.append(
self._physical_to_logical(os.path.join(dirname, filename)))
return sorted(files)
def get_jinja_environ(self, dir_names, autoescape=True):
"""Configure the environment for Jinja templates."""
physical_dir_names = []
for dir_name in dir_names:
physical_dir_names.append(self._logical_to_physical(dir_name))
return jinja_utils.create_jinja_environment(
loader=jinja2.FileSystemLoader(physical_dir_names),
autoescape=autoescape)
def is_read_write(self):
return False
class FileMetadataEntity(BaseEntity):
"""An entity to represent a file metadata; absolute file name is a key."""
# TODO(psimakov): do we need 'version' to support concurrent updates
# TODO(psimakov): can we put 'data' here and still have fast isfile/list?
created_on = db.DateTimeProperty(auto_now_add=True, indexed=False)
updated_on = db.DateTimeProperty(indexed=True)
# Draft file is just as any other file. It's up to the consumer of the file
# to decide whether to treat draft differently (not to serve it to the
# public, for example). This class does not care and just stores the bit.
is_draft = db.BooleanProperty(indexed=False)
size = db.IntegerProperty(indexed=False)
class FileDataEntity(BaseEntity):
"""An entity to represent file content; absolute file name is a key."""
data = db.BlobProperty()
class FileStreamWrapped(object):
"""A class that wraps a file stream, but adds extra attributes to it."""
def __init__(self, metadata, data):
self._metadata = metadata
self._data = data
def read(self):
"""Emulates stream.read(). Returns all bytes and emulates EOF."""
data = self._data
self._data = ''
return data
@property
def metadata(self):
return self._metadata
class StringStream(object):
"""A wrapper to pose a string as a UTF-8 byte stream."""
def __init__(self, text):
self._data = unicode.encode(text, 'utf-8')
def read(self):
"""Emulates stream.read(). Returns all bytes and emulates EOF."""
data = self._data
self._data = ''
return data
def string_to_stream(text):
return StringStream(text)
def stream_to_string(stream):
return stream.read().decode('utf-8')
class VirtualFileSystemTemplateLoader(jinja2.BaseLoader):
"""Loader of jinja2 templates from a virtual file system."""
def __init__(self, fs, logical_home_folder, dir_names):
self._fs = fs
self._logical_home_folder = AbstractFileSystem.normpath(
logical_home_folder)
self._dir_names = []
if dir_names:
for dir_name in dir_names:
self._dir_names.append(AbstractFileSystem.normpath(dir_name))
def get_source(self, unused_environment, template):
for dir_name in self._dir_names:
filename = AbstractFileSystem.normpath(
os.path.join(dir_name, template))
stream = self._fs.open(filename)
if stream:
return stream.read().decode('utf-8'), filename, True
raise jinja2.TemplateNotFound(template)
def list_templates(self):
all_templates = []
for dir_name in self._dir_names:
all_templates += self._fs.list(dir_name)
return all_templates
class ProcessScopedVfsCache(caching.ProcessScopedSingleton):
"""This class holds in-process global cache of VFS objects."""
@classmethod
def get_vfs_cache_len(cls):
return len(ProcessScopedVfsCache.instance()._cache.items.keys())
@classmethod
def get_vfs_cache_size(cls):
return ProcessScopedVfsCache.instance()._cache.total_size
def __init__(self):
self._cache = caching.LRUCache(
max_size_bytes=MAX_GLOBAL_CACHE_SIZE_BYTES,
max_item_size_bytes=MAX_GLOBAL_CACHE_ITEM_SIZE_BYTES)
self._cache.get_entry_size = self._get_entry_size
def _get_entry_size(self, key, value):
return sys.getsizeof(key) + value.getsizeof() if value else 0
@property
def cache(self):
return self._cache
VFS_CACHE_LEN = PerfCounter(
'gcb-models-VfsCacheConnection-cache-len',
'A total number of items in vfs cache.')
VFS_CACHE_SIZE_BYTES = PerfCounter(
'gcb-models-VfsCacheConnection-cache-bytes',
'A total size of items in vfs cache in bytes.')
VFS_CACHE_LEN.poll_value = ProcessScopedVfsCache.get_vfs_cache_len
VFS_CACHE_SIZE_BYTES.poll_value = ProcessScopedVfsCache.get_vfs_cache_size
class CacheFileEntry(caching.AbstractCacheEntry):
"""Cache entry representing a file."""
def __init__(self, filename, metadata, body):
self.filename = filename
self.metadata = metadata
self.body = body
self.created_on = datetime.datetime.utcnow()
def getsizeof(self):
return (
sys.getsizeof(self.filename) +
sys.getsizeof(self.metadata) +
sys.getsizeof(self.body) +
sys.getsizeof(self.created_on))
def is_up_to_date(self, key, update):
metadata = update
if not self.metadata and not metadata:
return True
if self.metadata and metadata:
return (
metadata.updated_on == self.metadata.updated_on and
metadata.is_draft == self.metadata.is_draft)
return False
def updated_on(self):
return self.metadata.updated_on
@classmethod
def externalize(cls, key, entry):
return FileStreamWrapped(entry.metadata, entry.body)
@classmethod
def internalize(cls, key, metadata, data):
if metadata and data:
return CacheFileEntry(key, metadata, data)
return None
class VfsCacheConnection(caching.AbstractCacheConnection):
PERSISTENT_ENTITY = FileMetadataEntity
CACHE_ENTRY = CacheFileEntry
@classmethod
def init_counters(cls):
super(VfsCacheConnection, cls).init_counters()
cls.CACHE_NO_METADATA = PerfCounter(
'gcb-models-VfsCacheConnection-cache-no-metadata',
'A number of times an object was requested, but was not found and '
'had no metadata.')
cls.CACHE_INHERITED = PerfCounter(
'gcb-models-VfsCacheConnection-cache-inherited',
'A number of times an object was obtained from the inherited vfs.')
@classmethod
def is_enabled(cls):
return CAN_USE_VFS_IN_PROCESS_CACHE.value
def __init__(self, namespace):
super(VfsCacheConnection, self).__init__(namespace)
self.cache = ProcessScopedVfsCache.instance().cache
VfsCacheConnection.init_counters()
class DatastoreBackedFileSystem(object):
"""A read-write file system backed by a datastore."""
@classmethod
def make_key(cls, filename):
return 'vfs:dsbfs:%s' % filename
def __init__(
self, ns, logical_home_folder,
inherits_from=None, inheritable_folders=None):
"""Creates a new instance of the datastore-backed file system.
Args:
ns: A datastore namespace to use for storing all data and metadata.
logical_home_folder: A logical home dir of all files (/a/b/c/...).
inherits_from: A file system to use for the inheritance.
inheritable_folders: A list of folders that support inheritance.
Returns:
A new instance of the object.
Raises:
Exception: if invalid inherits_from is given.
"""
if inherits_from and not isinstance(
inherits_from, LocalReadOnlyFileSystem):
raise Exception('Can only inherit from LocalReadOnlyFileSystem.')
self._ns = ns
self._logical_home_folder = AbstractFileSystem.normpath(
logical_home_folder)
self._inherits_from = inherits_from
self._inheritable_folders = []
self._cache = threading.local()
if inheritable_folders:
for folder in inheritable_folders:
self._inheritable_folders.append(AbstractFileSystem.normpath(
folder))
def __getstate__(self):
"""Remove transient members that can't survive pickling."""
# TODO(psimakov): we need to properly pickle app_context so vfs is not
# being serialized at all
state = self.__dict__.copy()
if '_cache' in state:
del state['_cache']
return state
def __setstate__(self, state_dict):
"""Set persistent members and re-initialize transient members."""
self.__dict__ = state_dict
self._cache = threading.local()
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
# Don't intercept access to private methods and attributes.
if name.startswith('_'):
return attr
# Do intercept all methods.
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
"""Set proper namespace for each method call."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self._ns)
if not hasattr(self._cache, 'connection'):
self._cache.connection = (
VfsCacheConnection.new_connection(self.ns))
return attr(*args, **kwargs)
finally:
namespace_manager.set_namespace(old_namespace)
return newfunc
# Don't intercept access to non-method attributes.
return attr
@property
def ns(self):
return self._ns
@property
def cache(self):
return self._cache.connection
def _logical_to_physical(self, filename):
filename = AbstractFileSystem.normpath(filename)
# For now we only support '/' as a physical folder name.
if self._logical_home_folder == '/':
return filename
if not filename.startswith(self._logical_home_folder):
raise Exception(
'Expected path \'%s\' to start with a prefix \'%s\'.' % (
filename, self._logical_home_folder))
rel_path = filename[len(self._logical_home_folder):]
if not rel_path.startswith('/'):
rel_path = '/%s' % rel_path
return rel_path
def physical_to_logical(self, filename):
"""Converts an internal filename to and external filename."""
# This class receives and stores absolute file names. The logical
# filename is the external file name. The physical filename is an
# internal filename. This function does the convertions.
# Let's say you want to store a file named '/assets/img/foo.png'.
# This would be a physical filename in the VFS. But the put() operation
# expects an absolute filename from the root of the app installation,
# i.e. something like '/dev/apps/coursebuilder/assets/img/foo.png',
# which is called a logical filename. This is a legacy expectation from
# the days the course was defined as files on the file system.
#
# This function will do the conversion you need.
return self._physical_to_logical(filename)
def _physical_to_logical(self, filename):
filename = AbstractFileSystem.normpath(filename)
# For now we only support '/' as a physical folder name.
if filename and not filename.startswith('/'):
filename = '/' + filename
if self._logical_home_folder == '/':
return filename
return '%s%s' % (self._logical_home_folder, filename)
def _can_inherit(self, filename):
"""Checks if a file can be inherited from a parent file system."""
for prefix in self._inheritable_folders:
if filename.startswith(prefix):
return True
return False
def get(self, afilename):
return self.open(afilename)
def open(self, afilename):
"""Gets a file from a datastore. Raw bytes stream, no encodings."""
filename = self._logical_to_physical(afilename)
found, stream = self.cache.get(filename)
if found and stream:
return stream
if not found:
metadata = FileMetadataEntity.get_by_key_name(filename)
if metadata:
data = FileDataEntity.get_by_key_name(filename)
if data:
self.cache.put(filename, metadata, data.data)
return FileStreamWrapped(metadata, data.data)
# lets us cache the (None, None) so next time we asked for this key
# we fall right into the inherited section without trying to load
# the metadata/data from the datastore; if a new object with this
# key is added in the datastore, we will see it in the update list
VfsCacheConnection.CACHE_NO_METADATA.inc()
self.cache.put(filename, None, None)
result = None
if self._inherits_from and self._can_inherit(filename):
result = self._inherits_from.get(afilename)
if result:
VfsCacheConnection.CACHE_INHERITED.inc()
return FileStreamWrapped(None, result.read())
VfsCacheConnection.CACHE_NOT_FOUND.inc()
return None
@db.transactional(xg=True)
def put(self, filename, stream, is_draft=False, metadata_only=False):
"""Puts a file stream to a database. Raw bytes stream, no encodings."""
self.non_transactional_put(
filename, stream, is_draft=is_draft, metadata_only=metadata_only)
def non_transactional_put(
self, filename, stream, is_draft=False, metadata_only=False):
"""Non-transactional put; use only when transactions are impossible."""
filename = self._logical_to_physical(filename)
metadata = FileMetadataEntity.get_by_key_name(filename)
if not metadata:
metadata = FileMetadataEntity(key_name=filename)
metadata.updated_on = datetime.datetime.utcnow()
metadata.is_draft = is_draft
if not metadata_only:
# We operate with raw bytes. The consumer must deal with encoding.
raw_bytes = stream.read()
metadata.size = len(raw_bytes)
data = FileDataEntity(key_name=filename)
data.data = raw_bytes
data.put()
metadata.put()
self.cache.delete(filename)
def put_multi_async(self, filedata_list):
"""Initiate an async put of the given files.
This method initiates an asynchronous put of a list of file data
(presented as pairs of the form (filename, data_source)). It is not
transactional, and does not block, and instead immediately returns a
callback function. When this function is called it will block until
the puts are confirmed to have completed. For maximum efficiency it's
advisable to defer calling the callback until all other request handling
has completed, but in any event, it MUST be called before the request
handler can exit successfully.
Args:
filedata_list: list. A list of tuples. The first entry of each
tuple is the file name, the second is a filelike object holding
the file data.
Returns:
callable. Returns a wait-and-finalize function. This function must
be called at some point before the request handler exists, in order
to confirm that the puts have succeeded.
"""
filename_list = []
data_list = []
metadata_list = []
for filename, stream in filedata_list:
filename = self._logical_to_physical(filename)
filename_list.append(filename)
metadata = FileMetadataEntity.get_by_key_name(filename)
if not metadata:
metadata = FileMetadataEntity(key_name=filename)
metadata_list.append(metadata)
metadata.updated_on = datetime.datetime.utcnow()
# We operate with raw bytes. The consumer must deal with encoding.
raw_bytes = stream.read()
metadata.size = len(raw_bytes)
data = FileDataEntity(key_name=filename)
data_list.append(data)
data.data = raw_bytes
# we do call delete here; so this instance will not increment EVICT
# counter value, but the DELETE value; other instance will not
# record DELETE, but EVICT when they query for updates
self.cache.delete(filename)
data_future = db.put_async(data_list)
metadata_future = db.put_async(metadata_list)
def wait_and_finalize():
data_future.check_success()
metadata_future.check_success()
return wait_and_finalize
@db.transactional(xg=True)
def delete(self, filename):
filename = self._logical_to_physical(filename)
metadata = FileMetadataEntity.get_by_key_name(filename)
if metadata:
metadata.delete()
data = FileDataEntity(key_name=filename)
if data:
data.delete()
self.cache.delete(filename)
def isfile(self, afilename):
"""Checks file existence by looking up the datastore row."""
filename = self._logical_to_physical(afilename)
metadata = FileMetadataEntity.get_by_key_name(filename)
if metadata:
return True
result = False
if self._inherits_from and self._can_inherit(filename):
result = self._inherits_from.isfile(afilename)
return result
def list(self, dir_name, include_inherited=False):
"""Lists all files in a directory by using datastore query.
Args:
dir_name: string. Directory to list contents of.
include_inherited: boolean. If True, includes all inheritable files
from the parent filesystem.
Returns:
List of string. Lexicographically-sorted unique filenames
recursively found in dir_name.
"""
dir_name = self._logical_to_physical(dir_name)
result = set()
keys = FileMetadataEntity.all(keys_only=True)
for key in keys.fetch(1000):
filename = key.name()
if filename.startswith(dir_name):
result.add(self._physical_to_logical(filename))
if include_inherited and self._inherits_from:
for inheritable_folder in self._inheritable_folders:
logical_folder = self._physical_to_logical(inheritable_folder)
result.update(set(self._inherits_from.list(
logical_folder,
include_inherited)))
return sorted(list(result))
def get_jinja_environ(self, dir_names, autoescape=True):
return jinja_utils.create_jinja_environment(
loader=VirtualFileSystemTemplateLoader(
self, self._logical_home_folder, dir_names),
autoescape=autoescape)
def is_read_write(self):
return True
class VfsTests(unittest.TestCase):
def test_pickling(self):
# pylint: disable=g-import-not-at-top
import pickle
pickle.dumps(caching.NoopCacheConnection())
pickle.dumps(caching.AbstractCacheConnection(None))
pickle.dumps(caching.AbstractCacheEntry())
pickle.dumps(CacheFileEntry('foo.bar', 'file metadata', 'file data'))
pickle.dumps(DatastoreBackedFileSystem('/', 'ns_test'))
with self.assertRaises(TypeError):
pickle.dumps(VfsCacheConnection('ns_test'))
def _setup_cache_with_one_entry(self, is_draft=True, updated_on=None):
ProcessScopedVfsCache.clear_all()
conn = VfsCacheConnection('ns_test')
meta = FileMetadataEntity()
meta.is_draft = is_draft
meta.updated_on = updated_on
conn.put('sample.txt', meta, 'file data')
found, stream = conn.get('sample.txt')
self.assertTrue(found)
self.assertEquals(stream.metadata.is_draft, meta.is_draft)
return conn
def test_expire(self):
conn = self._setup_cache_with_one_entry()
entry = conn.cache.items.get(conn.make_key('ns_test', 'sample.txt'))
self.assertTrue(entry)
entry.created_on = datetime.datetime.utcnow() - datetime.timedelta(
0, CacheFileEntry.CACHE_ENTRY_TTL_SEC + 1)
old_expire_count = VfsCacheConnection.CACHE_EXPIRE.value
found, stream = conn.get('sample.txt')
self.assertFalse(found)
self.assertEquals(stream, None)
self.assertEquals(
VfsCacheConnection.CACHE_EXPIRE.value - old_expire_count, 1)
def test_updates_with_no_changes_dont_evict(self):
class _Key(object):
def name(self):
return 'sample.txt'
def _key():
return _Key()
for is_draft, updated_on in [
(True, None), (True, datetime.datetime.utcnow()),
(False, None), (False, datetime.datetime.utcnow())]:
conn = self._setup_cache_with_one_entry(
is_draft=is_draft, updated_on=updated_on)
_, stream = conn.get('sample.txt')
meta = FileMetadataEntity()
meta.key = _key
meta.is_draft = stream.metadata.is_draft
meta.updated_on = stream.metadata.updated_on
updates = {'sample.txt': meta}
old_expire_count = VfsCacheConnection.CACHE_EVICT.value
conn.apply_updates(updates)
found, _ = conn.get('sample.txt')
self.assertTrue(found)
self.assertEquals(
VfsCacheConnection.CACHE_EVICT.value - old_expire_count, 0)
def test_empty_updates_dont_evict(self):
conn = self._setup_cache_with_one_entry()
updates = {}
old_expire_count = VfsCacheConnection.CACHE_EVICT.value
conn.apply_updates(updates)
found, _ = conn.get('sample.txt')
self.assertTrue(found)
self.assertEquals(
VfsCacheConnection.CACHE_EVICT.value - old_expire_count, 0)
def test_updates_with_changes_do_evict(self):
class _Key(object):
def name(self):
return 'sample.txt'
def _key():
return _Key()
def set_is_draft(meta, value):
meta.is_draft = value
def set_updated_on(meta, value):
meta.updated_on = value
conn = self._setup_cache_with_one_entry()
mutations = [
(lambda meta: set_is_draft(meta, False)),
(lambda meta: set_updated_on(meta, datetime.datetime.utcnow()))]
for mutation in mutations:
meta = FileMetadataEntity()
meta.key = _key
mutation(meta)
updates = {'sample.txt': meta}
conn.apply_updates(updates)
found, _ = conn.get('sample.txt')
self.assertFalse(found)
def test_apply_updates_expires_entries(self):
conn = self._setup_cache_with_one_entry()
entry = conn.cache.items.get(conn.make_key('ns_test', 'sample.txt'))
self.assertTrue(entry)
entry.created_on = datetime.datetime.utcnow() - datetime.timedelta(
0, CacheFileEntry.CACHE_ENTRY_TTL_SEC + 1)
updates = {}
conn.apply_updates(updates)
old_expire_count = VfsCacheConnection.CACHE_EXPIRE.value
found, stream = conn.get('sample.txt')
self.assertFalse(found)
self.assertEquals(stream, None)
self.assertEquals(
VfsCacheConnection.CACHE_EXPIRE.value - old_expire_count, 1)
def test_no_metadata_and_no_data_is_evicted(self):
ProcessScopedVfsCache.clear_all()
conn = VfsCacheConnection('ns_test')
conn.put('sample.txt', None, None)
meta = FileMetadataEntity()
meta.key = 'sample/txt'
updates = {'sample.txt': meta}
conn.apply_updates(updates)
found, stream = conn.get('sample.txt')
self.assertFalse(found)
self.assertEquals(stream, None)
def test_metadata_but_no_data_is_evicted(self):
ProcessScopedVfsCache.clear_all()
conn = VfsCacheConnection('ns_test')
meta = FileMetadataEntity()
meta.is_draft = True
meta.updated_on = datetime.datetime.utcnow()
conn.put('sample.txt', meta, None)
meta = FileMetadataEntity()
meta.key = 'sample/txt'
updates = {'sample.txt': meta}
conn.apply_updates(updates)
found, stream = conn.get('sample.txt')
self.assertFalse(found)
self.assertEquals(stream, None)
def run_all_unit_tests():
"""Runs all unit tests in this module."""
suites_list = []
for test_class in [VfsTests]:
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
suites_list.append(suite)
result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))
if not result.wasSuccessful() or result.errors:
raise Exception(result)
if __name__ == '__main__':
run_all_unit_tests()
| 35.711902
| 80
| 0.649961
|
6c21201f6f54af348b14049cb98c51a22e812237
| 807
|
py
|
Python
|
dns_spoofer.py
|
menuscreen/PythonEthicalHacking
|
ebd132b9d6d1f75a1edf2365efbe12c161d0f152
|
[
"Apache-2.0"
] | null | null | null |
dns_spoofer.py
|
menuscreen/PythonEthicalHacking
|
ebd132b9d6d1f75a1edf2365efbe12c161d0f152
|
[
"Apache-2.0"
] | null | null | null |
dns_spoofer.py
|
menuscreen/PythonEthicalHacking
|
ebd132b9d6d1f75a1edf2365efbe12c161d0f152
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# dns_spoofer.py
#
#
import netfilterqueue
import subprocess
import scapy.all as scapy
def process_packet(packet):
scapy_packet = scapy.IP(packet.get_payload())
print(scapy_packet.show())
packet.accept()
# packet.drop()
# for testing on local machine
subprocess.run("iptables -I OUTPUT -j NFQUEUE --queue-num 0", shell=True)
subprocess.run("iptables -I INPUT -j NFQUEUE --queue-num 0", shell=True)
print("[+] Creating Netfilter Queue in iptables.")
print("[+] Running...")
try:
while True:
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
except KeyboardInterrupt:
print("\n[-] Ctrl+C -> Interrupt Detected.")
subprocess.run("iptables --flush", shell=True)
print("[-] Flushing iptables.")
| 24.454545
| 73
| 0.684015
|
7dc771acb3e632ff7319eaef53c2a725f4ad9dff
| 514
|
py
|
Python
|
tranasaction.py
|
mmalarz/python-blockchain
|
f1c378cfe8027f8f92f701dfb33f5a869a2b2768
|
[
"MIT"
] | null | null | null |
tranasaction.py
|
mmalarz/python-blockchain
|
f1c378cfe8027f8f92f701dfb33f5a869a2b2768
|
[
"MIT"
] | null | null | null |
tranasaction.py
|
mmalarz/python-blockchain
|
f1c378cfe8027f8f92f701dfb33f5a869a2b2768
|
[
"MIT"
] | null | null | null |
import hashlib
class Transaction:
def __init__(self, from_address, to_address, amount, timestamp):
self.from_address = from_address
self.to_address = to_address
self.amount = amount
self.timestamp = timestamp
def __repr__(self):
return (
f'Transaction('
f'from_address={self.from_address}, '
f'timestamp={self.to_address}, '
f'transactions={self.amount}, '
f'previous_hash={self.timestamp})'
)
| 25.7
| 68
| 0.597276
|
bc4c07ce62d1fe872901cd2878f627841ffaa976
| 2,158
|
py
|
Python
|
pglifecycle/validation.py
|
gmr/pglifecycle
|
d8d3641ba8044f191e0b49686f0d028dd889a089
|
[
"BSD-3-Clause"
] | 3
|
2019-11-13T20:24:25.000Z
|
2021-12-06T13:21:10.000Z
|
pglifecycle/validation.py
|
gmr/pglifecycle
|
d8d3641ba8044f191e0b49686f0d028dd889a089
|
[
"BSD-3-Clause"
] | null | null | null |
pglifecycle/validation.py
|
gmr/pglifecycle
|
d8d3641ba8044f191e0b49686f0d028dd889a089
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Data validation using bundled JSON-Schema files
"""
import functools
import logging
import pathlib
import jsonschema
from jsonschema import exceptions
import pkg_resources
from pglifecycle import yaml
LOGGER = logging.getLogger(__name__)
def validate_object(obj_type: str, name: str, data: dict) -> bool:
"""Validate a data object using JSON-Schema"""
schema = _load_schemata(obj_type.lower())
# import json
# with open('{}.json'.format(obj_type), 'w') as handle:
# json.dump(schema, handle, indent=2)
try:
jsonschema.validate(data, schema)
except exceptions.ValidationError as error:
LOGGER.critical('Validation error for %s %s: %s for %r: %s',
obj_type, name, error.message,
error.path[0] if error.path
else error.absolute_schema_path[0],
error.instance)
return False
return True
@functools.lru_cache(maxsize=64)
def _load_schemata(obj_type: str) -> dict:
"""Load the schemata from the package, returning merged results of
other schema files if referenced in the file loaded.
:raises: FileNotFoundError
"""
schema_path = pathlib.Path(pkg_resources.resource_filename(
'pglifecycle', 'schemata/{}.yml'.format(obj_type).replace(' ', '_')))
if not schema_path.exists():
raise FileNotFoundError(
'Schema file not found for object type {!r}'.format(obj_type))
return _preprocess(yaml.load(schema_path))
def _preprocess(schema: dict) -> dict:
"""Merge in other schemas within the package if the `$package_schema` key
is found.
"""
schema_out = {}
for key, value in [(k, v) for k, v in schema.items()]:
if key == '$package_schema':
schema_out.update(_load_schemata(value))
elif isinstance(value, dict):
schema_out[key] = _preprocess(value)
elif isinstance(value, list):
schema_out[key] = [_preprocess(v) if isinstance(v, dict) else v
for v in value]
else:
schema_out[key] = value
return schema_out
| 30.394366
| 77
| 0.633457
|
a44427092bae88aa41b3b1d0684cfcf36835b3d2
| 1,636
|
py
|
Python
|
lite/demo/python/mobilenetv1_light_api.py
|
jameswu2014/Paddle-Lite
|
827e349ac8eb769a873fe9b3aa961af8b8b20a96
|
[
"Apache-2.0"
] | 1
|
2020-03-09T03:51:31.000Z
|
2020-03-09T03:51:31.000Z
|
lite/demo/python/mobilenetv1_light_api.py
|
jameswu2014/Paddle-Lite
|
827e349ac8eb769a873fe9b3aa961af8b8b20a96
|
[
"Apache-2.0"
] | null | null | null |
lite/demo/python/mobilenetv1_light_api.py
|
jameswu2014/Paddle-Lite
|
827e349ac8eb769a873fe9b3aa961af8b8b20a96
|
[
"Apache-2.0"
] | 1
|
2020-02-13T10:45:37.000Z
|
2020-02-13T10:45:37.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Paddle-Lite light python api demo
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
sys.path.append('../../python/lib')
from lite_core import *
# Command arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", default="", type=str, help="Non-combined Model dir path")
def RunModel(args):
# 1. Set config information
config = MobileConfig()
config.set_model_dir(args.model_dir)
# 2. Create paddle predictor
predictor = create_paddle_predictor(config)
# 3. Set input data
input_tensor = predictor.get_input(0)
input_tensor.resize([1, 3, 224, 224])
input_tensor.set_float_data([1.] * 3 * 224 * 224)
# 4. Run model
predictor.run()
# 5. Get output data
output_tensor = predictor.get_output(0)
print(output_tensor.shape())
print(output_tensor.float_data()[:10])
if __name__ == '__main__':
args = parser.parse_args()
RunModel(args)
| 28.701754
| 76
| 0.726161
|
3fb7697c2128b89068e6d696b7f2d6bf9be12fb0
| 5,164
|
py
|
Python
|
embedding/filter_columns.py
|
guenthermi/table-embeddings
|
3ce094483fc5057b18f898d450a7c376d49818fa
|
[
"MIT"
] | 6
|
2021-03-17T09:53:10.000Z
|
2022-03-28T18:26:22.000Z
|
embedding/filter_columns.py
|
guenthermi/table-embeddings
|
3ce094483fc5057b18f898d450a7c376d49818fa
|
[
"MIT"
] | null | null | null |
embedding/filter_columns.py
|
guenthermi/table-embeddings
|
3ce094483fc5057b18f898d450a7c376d49818fa
|
[
"MIT"
] | null | null | null |
import re
import gzip
import ujson as json
from datetime import datetime
from whatthelang import WhatTheLang
from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter
import utils
class ColumnFilter:
def __init__(self, config):
self.config = config
self.result_tables = list()
self.lang_filter = self.config['lang_filter']
# minimal number of chars in a text value
self.min_text_value_size = self.config['min_text_value_size']
# maximal number of chars in a text value
self.max_text_value_size = self.config['max_text_value_size'] if type(
self.config['max_text_value_size']) == int else float('inf')
self.min_col_size = self.config['min_col_size']
self.re_filter = utils.RE_VALID_STRING
self.wtl = WhatTheLang()
return
def filter_columns(self, table):
if table['headerPosition'] == 'FIRST_COLUMN':
table['relation'] = list(zip(*table['relation']))
columns = list()
for col in table['relation']:
col_size = 0
new_column = []
for elem in col:
if (len(elem) < self.min_text_value_size) or (self.re_filter.fullmatch(elem) == None) or (len(elem) > self.max_text_value_size):
new_column.append(None)
else:
new_column.append(self._regularize_numbers(self._regularize_special_signs(elem)))
col_size += 1
if col_size >= self.min_col_size:
columns.append(new_column)
if len(columns) > 0:
if self.lang_filter != 'none':
text = ' '.join(
[' '.join([c for c in col if c != None]) for col in columns])
lang = self.wtl.predict_lang(text)
if lang != self.lang_filter:
return None
return {
'relation': columns,
'url': table['url'],
'title': table['title']
}
return None
def apply_filter(self):
BATCH_SIZE = 100000
size = self.config['max_size'] if type(
self.config['max_size']) == int else float('inf')
file_paths = self.config['dump_paths']
count = 0
self.result_tables = list()
self.init_output_file()
for file_path in file_paths:
f = gzip.open(file_path, 'rb')
meta_data = f.readline().decode('utf-8')
line = f.readline().decode('utf-8')
while line:
count += 1
if count % 10000 == 0:
print('Processed', count, 'tables')
if count > size:
break
data = json.loads(line)
table = self.filter_columns(data)
if table != None:
self.result_tables.append(table)
line = f.readline().decode('utf-8')
if len(self.result_tables) > BATCH_SIZE:
self.output_batch()
f.close()
self.output_batch()
self.close_output_file()
return
def init_output_file(self):
self.f_out = gzip.open(self.config['output_file'], 'w')
meta_data = self.config
meta_data['time_stamp'] = datetime.now().ctime()
self.f_out.write((json.dumps(meta_data) + '\n').encode('utf-8'))
def output_batch(self):
for table in self.result_tables:
self.f_out.write((json.dumps(table)+'\n').encode('utf-8'))
self.result_tables = []
def close_output_file(self):
self.f_out.close()
def output_tables(self):
# DEPRECATED
f = gzip.open(self.config['output_file'], 'w')
meta_data = self.config
meta_data['time_stamp'] = datetime.now().ctime()
f.write((json.dumps(meta_data) + '\n').encode('utf-8'))
for table in self.result_tables:
f.write((json.dumps(table)+'\n').encode('utf-8'))
f.close()
def _regularize_numbers(self, value):
return utils.RE_REGULARIZE_NUMBERS.sub('@',value)
def _regularize_special_signs(self, value):
return utils.RE_REGULARIZE_SPECIAL_SIGNS.sub('*', value)
def create_arg_parser():
parser = ArgumentParser("filter_columns",
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve',
description='Takes a dump of web tables and filter the columns of each table by filter criteria')
parser.add_argument('-c', '--config',
help="file with configuration of input, output destination, and filter criteria", required=True, nargs=1)
return parser
def main():
parser = create_arg_parser()
args = parser.parse_args()
# Parse config file
f_config = open(args.config[0], 'r')
config = json.load(f_config)
print('Create column filter ...')
filter = ColumnFilter(config)
print('Apply column / row filter ...')
filter.apply_filter()
print('Done')
if __name__ == "__main__":
main()
| 35.129252
| 144
| 0.572037
|
755408848d7c9e768267f259f382c47fab1514df
| 863
|
py
|
Python
|
setup.py
|
bratao/tus-flask
|
3a4c8ed79ec8d5f063ab1fabb1a22e395592b26f
|
[
"MIT"
] | 1
|
2019-04-10T02:38:00.000Z
|
2019-04-10T02:38:00.000Z
|
setup.py
|
bratao/tus-flask
|
3a4c8ed79ec8d5f063ab1fabb1a22e395592b26f
|
[
"MIT"
] | 1
|
2018-10-16T13:28:16.000Z
|
2018-10-16T13:28:16.000Z
|
setup.py
|
bratao/tus-flask
|
3a4c8ed79ec8d5f063ab1fabb1a22e395592b26f
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
setup(
name='tus-flask',
version='0.1.0',
description='A flask filter for the TUS resumable upload protocol',
long_description=long_description,
url='bratao/tus-flask',
author='bratao',
author_email='bruno@potelo.com.br',
keywords='tus flask filter',
license='MIT',
py_modules=['tusfilter'],
install_requires=['WebOb'],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
'License :: OSI Approved :: MIT License',
],
)
| 26.151515
| 71
| 0.621089
|
1f13827d26a9d1d8a5ea18927e42bbb93e1bb68c
| 4,010
|
py
|
Python
|
src/glum_benchmarks/benchmark_dense_sandwich.py
|
readthedocs-assistant/glum
|
d6c114df9b860cf73b201d846b7965cb15fc7720
|
[
"BSD-3-Clause"
] | 68
|
2021-10-08T09:05:29.000Z
|
2022-03-28T14:40:58.000Z
|
src/glum_benchmarks/benchmark_dense_sandwich.py
|
readthedocs-assistant/glum
|
d6c114df9b860cf73b201d846b7965cb15fc7720
|
[
"BSD-3-Clause"
] | 49
|
2021-10-08T01:44:18.000Z
|
2022-03-08T08:53:00.000Z
|
src/glum_benchmarks/benchmark_dense_sandwich.py
|
readthedocs-assistant/glum
|
d6c114df9b860cf73b201d846b7965cb15fc7720
|
[
"BSD-3-Clause"
] | 11
|
2021-10-14T10:34:53.000Z
|
2022-03-09T11:38:29.000Z
|
import time
from typing import Any, Callable, Dict, List, Tuple
import numpy as np
import pandas as pd
from tabmat.ext.dense import dense_sandwich
def _numpy_mklC(X, d):
sqrtD = np.sqrt(d)[:, np.newaxis]
x_d = X[0] * sqrtD
return x_d.T @ x_d
def _numpy_mklF(X, d):
sqrtD = np.sqrt(d)[:, np.newaxis]
x_d = X[1] * sqrtD
return x_d.T @ x_d
def _bench(f: Callable, iter: int) -> Tuple[List[float], Any]:
ts = []
for _ in range(iter):
start = time.time()
out: Any = f()
ts.append(time.time() - start)
return ts, out
def _dense_sandwichC(X, d):
return dense_sandwich(X[0], d)
def _dense_sandwichF(X, d):
return dense_sandwich(X[1], d)
def _mn_run(m, n, iter, dtype):
precision = dtype().itemsize * 8
X = [np.random.rand(n, m).astype(dtype=dtype)]
d = np.random.rand(n).astype(dtype=dtype)
X.append(np.asfortranarray(X[0]))
out: Dict[str, Any] = {"name": [], "runtime": []}
to_run = [
"numpy_mklC",
# "numpy_mklF",
"_dense_sandwichC",
"_dense_sandwichF",
]
for name in to_run:
ts, result = _bench(lambda: globals()[name](X, d), iter)
if name == "numpy_mklC":
true = result
elif "numpy_mklC" in to_run:
err = np.abs((true - result) / true)
np.testing.assert_almost_equal(err, 0, 4 if precision == 32 else 7)
runtime = np.min(ts)
out["name"].append(name)
out["runtime"].append(runtime)
print(name, runtime)
out_df = pd.DataFrame(out)
out_df["m"] = m
out_df["n"] = n
out_df["precision"] = precision
return out_df
def main():
"""Run some kind of benchmark."""
iter = 20
Rs = []
for m, n in [
(20, 1000000),
# (50, 500000),
# (150, 200000),
# (300, 100000),
# (2048, 2048),
# (1500, 1500),
(500, 500),
]:
for dt in [np.float64]:
Rs.append(_mn_run(m, n, iter, dt))
df = pd.concat(Rs)
df.set_index(["m", "n", "name", "precision"], inplace=True)
df.sort_index(inplace=True)
print(df)
def main2():
"""Run some kind of benchmark."""
n = 500
m = 500
dtype = np.float64
X = np.asfortranarray(np.random.rand(n, m).astype(dtype=dtype))
d = np.random.rand(n).astype(dtype=dtype)
t1d = []
pls = []
krs = []
ibs = []
results = []
# for thresh1d in [16, 32, 64, 128]:
# for parlevel in [5, 7, 10, 13]:
# for kratio in [1, 10, 20, 80]:
for thresh1d in [32, 64]:
for parlevel in [9]:
for kratio in [8, 16]:
for innerblock in [32, 64, 128, 256]:
t1d.append(thresh1d)
pls.append(parlevel)
krs.append(kratio)
ibs.append(innerblock)
# results.append(np.min(bench(lambda: X.T @ X, 1)[0]))
results.append(
np.min(
_bench(
lambda: dense_sandwich(
X, d, thresh1d, parlevel, kratio, innerblock
),
50,
)[0]
)
)
print(results[-1])
df = pd.DataFrame(
dict(thresh1d=t1d, parlevel=pls, kratio=krs, innerblock=ibs, results=results)
)
df.set_index(["thresh1d", "parlevel", "kratio", "innerblock"], inplace=True)
df.sort_index(inplace=True)
with pd.option_context("display.max_rows", None, "display.max_columns", None):
print(df)
# 841650213 L1-dcache-load-misses # 12.01% of all L1-dcache hits (71.87%)
# 7006517280 L1-dcache-loads (71.53%)
# 1016757397 L1-dcache-stores (69.82%)
if __name__ == "__main__":
main()
| 28.239437
| 88
| 0.500249
|
daa690fc5242a02a9f8cf1f2ec582199255817de
| 11,223
|
py
|
Python
|
oss_net/decoder.py
|
ChristophReich1996/OSS-Net
|
38ffae60286b53e72f2d17f510dbbfffb7036caa
|
[
"MIT"
] | 17
|
2021-11-03T18:18:38.000Z
|
2022-03-20T12:35:56.000Z
|
oss_net/decoder.py
|
ChristophReich1996/OSS-Net
|
38ffae60286b53e72f2d17f510dbbfffb7036caa
|
[
"MIT"
] | null | null | null |
oss_net/decoder.py
|
ChristophReich1996/OSS-Net
|
38ffae60286b53e72f2d17f510dbbfffb7036caa
|
[
"MIT"
] | 1
|
2022-01-12T04:34:39.000Z
|
2022-01-12T04:34:39.000Z
|
from typing import Type, Tuple, Any
import math
import torch
import torch.nn as nn
from pade_activation_unit.utils import PAU
class ResidualCBNDecoder(nn.Module):
"""
This class implements a residual and Conditional Batch Normalization (CBN) based occupancy decoder.
"""
def __init__(self,
output_features: int = 1,
latent_features: int = 320,
features: Tuple[Tuple[int, int], ...] = (
(448, 256), (256, 256), (256, 256), (256, 256), (256, 256)),
activation: Type[nn.Module] = nn.ReLU,
dropout: float = 0.0,
fourier_input_features: bool = True,
patch_mapping: bool = True,
large_patches: bool = True,
patch_channels: int = 4,
**kwargs: Any) -> None:
"""
Constructor method
:param output_features: (int) Number of output features (binary classification = 1)
:param latent_features: (int) Number of features in the encoder latent vector
:param features: (Tuple[Tuple[int, int], ...]) Features (in and out) utilized in each block
:param activation: (Type[nn.Module]) Type of activation function to be utilized
:param dropout: (float) Dropout rate to be utilized
:param fourier_input_features: (bool) If true random fourier input features are utilized
:param patch_mapping: (bool) If true patch mapping and patches are utilized
:param large_patches: (bool) If true additional channels are used in patch mapping for large patches
:param kwargs: Key word arguments (not used)
"""
# Call super constructor
super(ResidualCBNDecoder, self).__init__()
# Save parameters
self.fourier_input_features = fourier_input_features
if self.fourier_input_features:
self.register_buffer("b", torch.randn(1, 3, 16) * 4.)
# Init residual blocks
self.blocks = nn.ModuleList(
[ResidualCBNFFNNBlock(in_features=feature[0], out_features=feature[1], latent_features=latent_features,
activation=activation, dropout=dropout) for feature in features])
# Init patch mapping
self.patch_mapping = PatchMapping(in_channels=patch_channels * 2 if large_patches else patch_channels,
out_channels=1, activation=activation,
dropout=dropout) if patch_mapping else None
# Init final layer and activation
self.final_mapping = nn.Sequential(
nn.Linear(in_features=features[-1][-1], out_features=output_features, bias=False),
nn.Softmax(dim=-1) if output_features > 1 else nn.Sigmoid()
)
def forward(self, coordinates: torch.Tensor, patches: torch.Tensor, latent_vectors: torch.Tensor) -> torch.Tensor:
"""
Forward pass
:param coordinates: (torch.Tensor) Input coordinates
:param patches: (torch.Tensor) Patches
:param latent_vectors: (torch.Tensor) Latent vectors
:return: (torch.Tensor) Output tensor
"""
# Perform fourier feature mapping if utilized
if self.fourier_input_features:
coordinates = torch.cat([torch.cos(2 * math.pi * coordinates @ self.b),
torch.sin(2 * math.pi * coordinates @ self.b),
coordinates], dim=-1)
# Construct input tensor
if self.patch_mapping is not None:
input = torch.cat([
coordinates,
self.patch_mapping(patches).flatten(start_dim=2),
latent_vectors[:, :1].repeat_interleave(repeats=coordinates.shape[0] // latent_vectors.shape[0],
dim=0)],
dim=-1)
else:
input = torch.cat([
coordinates,
latent_vectors[:, :1].repeat_interleave(repeats=coordinates.shape[0] // latent_vectors.shape[0],
dim=0)],
dim=-1)
# Forward pass residual CBN blocks
if latent_vectors.shape[1] == 1:
for index, block in enumerate(self.blocks):
input = block(input, latent_vectors)
else:
for index, block in enumerate(self.blocks):
input = block(input, latent_vectors[:, index + 1:index + 2])
# Forward pass final layer and activation
output = self.final_mapping(input).squeeze(dim=1)
return output
class ResidualCBNFFNNBlock(nn.Module):
"""
This class implements a simple residual feed-forward neural network block with two linear layers and CBN.
"""
def __init__(self, in_features: int, out_features: int, latent_features: int,
activation: Type[nn.Module] = PAU, dropout: float = 0.0) -> None:
"""
Constructor method
:param in_features: (int) Number of input features
:param out_features: (int) Number of output features
:param latent_features: (int) Number of latent features
:param activation: (Type[nn.Module]) Type of activation function to be utilized
:param dropout: (float) Dropout rate to be utilized
"""
# Call super constructor
super(ResidualCBNFFNNBlock, self).__init__()
# Init linear layers
self.linear_layer_1 = nn.Linear(in_features=in_features, out_features=out_features, bias=True)
self.linear_layer_2 = nn.Linear(in_features=out_features, out_features=out_features, bias=True)
# Init activations
self.activation = activation()
self.final_activation = activation()
# Init dropout layer
self.dropout = nn.Dropout(p=dropout, inplace=True)
# Init residual mapping
self.residual_mapping = nn.Linear(in_features=in_features, out_features=out_features,
bias=True) if in_features != out_features else nn.Identity()
# Init CNB
self.cnb_1 = ConditionalBatchNorm1d(latent_features=latent_features, input_features=out_features)
self.cnb_2 = ConditionalBatchNorm1d(latent_features=latent_features, input_features=out_features)
def forward(self, input: torch.Tensor, latent_vector: torch.Tensor) -> torch.Tensor:
"""
Forward pass
:param input: (torch.Tensor) Input tensor of the shape [batch size * coordinates, *, in features]
:param latent_vector: (torch.Tensor) Encoded latent vector of the shape [batch size, latent features]
:return: (torch.Tensor) Output tensor of the shape [batch size * coordinates, *, out features]
"""
# Forward pass first stage
output = self.linear_layer_1(input)
output = self.cnb_1(output, latent_vector)
output = self.activation(output)
output = self.dropout(output)
# Forward pass second stage
output = self.linear_layer_2(output)
output = self.cnb_2(output, latent_vector)
# Forward pass residual mapping and final activation
output = output + self.residual_mapping(input)
output = self.final_activation(output)
return output
class ConditionalBatchNorm1d(nn.Module):
"""
Implementation of a conditional batch normalization module using linear operation to predict gamma and beta
"""
def __init__(self, latent_features: int, input_features: int,
normalization: Type[nn.Module] = nn.BatchNorm1d) -> None:
"""
Conditional batch normalization module including two 1D convolutions to predict gamma end beta
:param latent_features: (int) Features of the latent vector
:param input_features: (int) Features of the output vector to be normalized
:param normalization: (Type[nn.Module]) Type of normalization to be utilized
"""
super(ConditionalBatchNorm1d, self).__init__()
# Init operations
self.linear_gamma = nn.Linear(in_features=latent_features, out_features=input_features, bias=True)
self.linear_beta = nn.Linear(in_features=latent_features, out_features=input_features, bias=True)
self.normalization = normalization(num_features=1, affine=False, track_running_stats=True, momentum=0.1)
# Reset parameters of convolutions
self.reset_parameters()
def reset_parameters(self) -> None:
"""
Method resets the parameter of the convolution to predict gamma and beta
"""
nn.init.zeros_(self.linear_gamma.weight)
nn.init.zeros_(self.linear_beta.weight)
nn.init.ones_(self.linear_gamma.bias)
nn.init.zeros_(self.linear_beta.bias)
def forward(self, input: torch.Tensor, latent_vector: torch.Tensor) -> torch.Tensor:
"""
Forward pass
:param input: (torch.Tensor) Input tensor to be normalized of shape (batch size * coordinates, 1, features)
:param latent_vector: (torch.Tensor) Latent vector tensor of shape (batch_size, features)
:return: (torch.Tensor) Normalized tensor
"""
# Perform linear layers to estimate gamma and beta
gamma = self.linear_gamma(latent_vector)
beta = self.linear_beta(latent_vector)
# Perform normalization
output_normalized = self.normalization(input)
# Repeat gamma and beta to apply factors to every coordinate
gamma = gamma.repeat_interleave(output_normalized.shape[0] // gamma.shape[0], dim=0)
beta = beta.repeat_interleave(output_normalized.shape[0] // beta.shape[0], dim=0)
# Add factors
output = gamma * output_normalized + beta
return output
class PatchMapping(nn.Module):
"""
This class implements a patch mapping module.
"""
def __init__(self, in_channels: int, out_channels: int, activation: Type[nn.Module] = nn.PReLU,
dropout: float = 0.) -> None:
"""
Constructor method
:param in_channels: (int) Number of input channels
:param out_channels: (int) Number of output channels
:param activation: (Type[nn.Module]) Type of activation to be utilized
"""
# Call super constructor
super(PatchMapping, self).__init__()
# Init mapping
self.mapping = nn.Sequential(
nn.Conv3d(in_channels=in_channels, out_channels=max(in_channels // 2, 1), kernel_size=(7, 7, 7),
stride=(1, 1, 1), padding=(3, 3, 3), bias=True),
activation(),
nn.Dropout(p=dropout, inplace=True),
nn.Conv3d(in_channels=max(in_channels // 2, 1), out_channels=out_channels, kernel_size=(7, 7, 7),
stride=(1, 1, 1), padding=(3, 3, 3), bias=True),
activation()
)
def forward(self, patches: torch.Tensor) -> torch.Tensor:
"""
Forward pass
:param patches: (torch.Tensor) Input volume of 3d patches
:return: (torch.Tensor) Output feature tensor
"""
output = self.mapping(patches)
return output
| 47.757447
| 118
| 0.627372
|
7175be6a886ab36cc03e4d4b7cdeaefabc5dc93c
| 4,272
|
py
|
Python
|
otcextensions/sdk/auto_scaling/v1/group.py
|
kucerakk/python-otcextensions
|
d74d6aaa6dcf7c46d2c5fbe3676656baaf8e81d6
|
[
"Apache-2.0"
] | null | null | null |
otcextensions/sdk/auto_scaling/v1/group.py
|
kucerakk/python-otcextensions
|
d74d6aaa6dcf7c46d2c5fbe3676656baaf8e81d6
|
[
"Apache-2.0"
] | null | null | null |
otcextensions/sdk/auto_scaling/v1/group.py
|
kucerakk/python-otcextensions
|
d74d6aaa6dcf7c46d2c5fbe3676656baaf8e81d6
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
from otcextensions.sdk.auto_scaling.v1 import _base
class Group(_base.Resource):
resource_key = 'scaling_group'
resources_key = 'scaling_groups'
base_path = '/scaling_group'
query_marker_key = 'start_number'
# service = auto_scaling_service.AutoScalingService()
# capabilities
allow_create = True
allow_list = True
allow_fetch = True
allow_delete = True
allow_update = True
_query_mapping = resource.QueryParameters(
'scaling_configuration_id', 'scaling_group_name', 'limit',
scaling_group_name='scaling_group_name',
# status='scaling_group_status',
marker=query_marker_key,
limit='limit'
)
#: Properties
#: AutoScaling group ID
id = resource.Body('scaling_group_id', alternate_id=True)
#: AutoScaling group name
name = resource.Body('scaling_group_name')
#: AutoScaling group status,
#: valid valus includes: ``INSERVICE``, ``PAUSED``, ``ERROR``
status = resource.Body('scaling_group_status')
#: AutoScaling group scaling status, *Type: bool*
is_scaling = resource.Body('is_scaling', type=bool)
#: AutoScaling group detail
detail = resource.Body('detail')
#: VPC id - (Router Id)
network_id = resource.Body('vpc_id')
#: network id list - (Subnet)
subnetworks = resource.Body('networks', type=list)
#: security group id list
security_groups = resource.Body('security_groups', type=list)
#: Auto Scaling Config ID reference, used for creating instance
scaling_configuration_id = resource.Body('scaling_configuration_id')
#: Auto Scaling Config name
scaling_configuration_name = resource.Body('scaling_configuration_name')
#: Current alive instance number
current_instance_number = resource.Body('current_instance_number')
#: Desire alive instance number
desire_instance_number = resource.Body('desire_instance_number')
#: min alive instance number
min_instance_number = resource.Body('min_instance_number')
#: max alive instance number
max_instance_number = resource.Body('max_instance_number')
#: CoolDown time, only work with `ALARM` policy.
#: default is 900, valid range is 0-86400
cool_down_time = resource.Body('cool_down_time')
#: load balancer listener id reference
lb_listener_id = resource.Body('lb_listener_id')
#: Health periodic audit method, Valid values includes: ``ELB_AUDIT``,
#: ``NOVA_AUDIT``, ELB_AUDIT and lb_listener_id are used in pairs.
health_periodic_audit_method = resource.Body(
'health_periodic_audit_method')
#: Health periodic audit time, valid values includes: ``5``, ``15``,
#: ``60``, ``180``, default is ``5`` minutes
health_periodic_audit_time = resource.Body('health_periodic_audit_time')
#: Instance terminate policy, valid values includes:
#: ``OLD_CONFIG_OLD_INSTANCE`` (default), ``OLD_CONFIG_NEW_INSTANCE``,
#: ``OLD_INSTANCE``, ``NEW_INSTANCE``
instance_terminate_policy = resource.Body('instance_terminate_policy')
#: notification methods, ``EMAIL``
notifications = resource.Body('notifications')
#: Should delete public ip when terminate instance, default ``false``
delete_publicip = resource.Body('delete_publicip', type=bool)
#: availability zones
availability_zones = resource.Body('available_zones')
#: Create time of the group
create_time = resource.Body('create_time')
def resume(self, session):
'''resume group'''
body = {'action': 'resume'}
self._action(session, body)
def pause(self, session):
'''pause group'''
body = {'action': 'pause'}
self._action(session, body)
| 41.475728
| 76
| 0.707397
|
0675dea855ba9d164604c77f69869ebcd9890e5b
| 1,066
|
py
|
Python
|
scripts/quantile_plot.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 2
|
2021-04-10T18:12:19.000Z
|
2021-05-11T12:07:40.000Z
|
scripts/quantile_plot.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 1
|
2021-04-22T15:46:27.000Z
|
2021-04-22T15:46:27.000Z
|
scripts/quantile_plot.py
|
always-newbie161/pyprobml
|
eb70c84f9618d68235ef9ba7da147c009b2e4a80
|
[
"MIT"
] | 1
|
2021-06-21T01:18:07.000Z
|
2021-06-21T01:18:07.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from scipy.stats import norm
x = np.linspace(-3, 3, 100)
y = norm.pdf(x)
f = norm.cdf(x)
plt.figure()
plt.plot(x, f)
plt.title('CDF')
pml.save_fig('gaussianCDF.pdf')
plt.show()
plt.figure()
plt.plot(x, y)
pml.save_fig('gaussianPDF.pdf')
plt.show()
plt.figure()
plt.plot(x, y)
x_sep_left = norm.ppf(0.025)
x_sep_right = norm.ppf(0.975)
x_fill_left = np.linspace(-3, x_sep_left, 100)
x_fill_right = np.linspace(x_sep_right, 3, 100)
plt.fill_between(x_fill_left,
norm.pdf(x_fill_left),
color='b')
plt.fill_between(x_fill_right,
norm.pdf(x_fill_right),
color='b')
plt.annotate(r'$\alpha/2$', xy=(x_sep_left, norm.pdf(x_sep_left)),
xytext=(-2.5, 0.1),
arrowprops=dict(facecolor='k'))
plt.annotate(r'$1-\alpha/2$', xy=(x_sep_right, norm.pdf(x_sep_right)),
xytext=(2.5, 0.1),
arrowprops=dict(facecolor='k'))
plt.ylim([0, 0.5])
pml.save_fig('gaussianQuantile.pdf')
plt.show()
| 24.227273
| 70
| 0.640713
|
7a9af24eb92488c2b38938c1e5e87418e94a4de3
| 1,161
|
py
|
Python
|
neutron/db/model_base.py
|
igor-toga/local-snat
|
7adfe5668d309ff56350acee0d0b986e670abe7c
|
[
"Apache-2.0"
] | 1
|
2017-09-10T09:57:35.000Z
|
2017-09-10T09:57:35.000Z
|
neutron/db/model_base.py
|
igor-toga/local-snat
|
7adfe5668d309ff56350acee0d0b986e670abe7c
|
[
"Apache-2.0"
] | null | null | null |
neutron/db/model_base.py
|
igor-toga/local-snat
|
7adfe5668d309ff56350acee0d0b986e670abe7c
|
[
"Apache-2.0"
] | 1
|
2015-05-05T14:41:11.000Z
|
2015-05-05T14:41:11.000Z
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.db import model_base as lib_mb
import sqlalchemy as sa
from neutron.common import _deprecate
_deprecate._moved_global('HasTenant', new_module=lib_mb, new_name='HasProject')
def get_unique_keys(model):
try:
constraints = model.__table__.constraints
except AttributeError:
constraints = []
return [[c.name for c in constraint.columns]
for constraint in constraints
if isinstance(constraint, sa.UniqueConstraint)]
# This shim is used to deprecate the old contents.
_deprecate._MovedGlobals(lib_mb)
| 32.25
| 79
| 0.749354
|
df62a0f4beec62059401f7a7108f8dc4fa8faee0
| 146
|
py
|
Python
|
tests/conftest.py
|
kennyworkman/colony-visual
|
d4eacb1e90d449e93a34a1ca2e7a1dce067d049e
|
[
"MIT",
"Unlicense"
] | 2
|
2019-09-04T07:36:56.000Z
|
2020-05-23T15:20:37.000Z
|
tests/conftest.py
|
kennyworkman/colony-visual
|
d4eacb1e90d449e93a34a1ca2e7a1dce067d049e
|
[
"MIT",
"Unlicense"
] | null | null | null |
tests/conftest.py
|
kennyworkman/colony-visual
|
d4eacb1e90d449e93a34a1ca2e7a1dce067d049e
|
[
"MIT",
"Unlicense"
] | null | null | null |
import pytest
from cvisual.utils import get_bam_vcf_files
@pytest.fixture
def dummy_data():
return get_bam_vcf_files('./tests/dummy_data/')
| 18.25
| 51
| 0.787671
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.