hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2916409383c8771ce3ecd7cf53468b379e9b3680
| 47
|
py
|
Python
|
snakemake/version.py
|
baffelli/snakemake
|
4a4f449019d7aac5a7c6a50c98ea0c8b3e194120
|
[
"MIT"
] | null | null | null |
snakemake/version.py
|
baffelli/snakemake
|
4a4f449019d7aac5a7c6a50c98ea0c8b3e194120
|
[
"MIT"
] | null | null | null |
snakemake/version.py
|
baffelli/snakemake
|
4a4f449019d7aac5a7c6a50c98ea0c8b3e194120
|
[
"MIT"
] | null | null | null |
__version__ = "3.9.1"
MIN_PY_VERSION = (3, 3)
| 11.75
| 23
| 0.638298
|
e4ad9cd9e642eea36217e7211032d5ea9cbebe3f
| 6,180
|
py
|
Python
|
setup.py
|
poplarShift/panel
|
1df70b5105639869997707d7efd2897082be390e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
poplarShift/panel
|
1df70b5105639869997707d7efd2897082be390e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
poplarShift/panel
|
1df70b5105639869997707d7efd2897082be390e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import shutil
import sys
import json
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
from setuptools.command.sdist import sdist
import pyct.build
def get_setup_version(reponame):
"""
Helper to get the current version from either git describe or the
.version file (if available).
"""
basepath = os.path.split(__file__)[0]
version_file_path = os.path.join(basepath, reponame, '.version')
try:
from param import version
except Exception:
version = None
if version is not None:
return version.Version.setup_version(basepath, reponame, archive_commit="$Format:%h$")
else:
print("WARNING: param>=1.6.0 unavailable. If you are installing a package, "
"this warning can safely be ignored. If you are creating a package or "
"otherwise operating in a git repository, you should install param>=1.6.0.")
return json.load(open(version_file_path, 'r'))['version_string']
def _build_paneljs():
from bokeh.ext import build
print("Building custom models:")
panel_dir = os.path.join(os.path.dirname(__file__), "panel")
build(panel_dir)
class CustomDevelopCommand(develop):
"""Custom installation for development mode."""
def run(self):
_build_paneljs()
develop.run(self)
class CustomInstallCommand(install):
"""Custom installation for install mode."""
def run(self):
_build_paneljs()
install.run(self)
class CustomSdistCommand(sdist):
"""Custom installation for sdist mode."""
def run(self):
_build_paneljs()
sdist.run(self)
_COMMANDS = {
'develop': CustomDevelopCommand,
'install': CustomInstallCommand,
'sdist': CustomSdistCommand,
}
try:
from wheel.bdist_wheel import bdist_wheel
class CustomBdistWheelCommand(bdist_wheel):
"""Custom bdist_wheel command to force cancelling qiskit-terra wheel
creation."""
def run(self):
"""Do nothing so the command intentionally fails."""
_build_paneljs()
bdist_wheel.run(self)
_COMMANDS['bdist_wheel'] = CustomBdistWheelCommand
except Exception:
pass
########## dependencies ##########
install_requires = [
'bokeh >=2.0.0',
'param >=1.9.3',
'pyviz_comms >=0.7.4',
'markdown',
'tqdm',
'pyct >=0.4.4'
]
_recommended = [
'notebook >=5.4',
'holoviews >=1.13.2',
'matplotlib',
'pillow',
'plotly'
]
_tests = [
'flake8',
'parameterized',
'pytest',
'scipy',
'nbsmoke >=0.2.0',
'pytest-cov',
'codecov',
'folium',
]
extras_require = {
'examples': [
'hvplot',
'plotly',
'altair',
'streamz',
'vega_datasets',
'vtk',
'scikit-learn',
'datashader',
'jupyter_bokeh',
'django',
'pyvista',
],
'tests': _tests,
'recommended': _recommended,
'doc': _recommended + [
'nbsite >=0.6.1',
'sphinx_holoviz_theme',
'selenium',
'phantomjs',
'graphviz',
'lxml',
]
}
extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
# Superset of what's in pyproject.toml (includes non-python
# dependencies). Also, pyproject.toml isn't supported by all tools
# anyway (e.g. older versions of pip, or conda - which also supports
# non-python dependencies). Note that setup_requires isn't used
# because it doesn't work well with pip.
extras_require['build'] = [
'param >=1.9.2',
'pyct >=0.4.4',
'setuptools >=30.3.0',
'bokeh >=2.0.0',
'pyviz_comms >=0.6.0',
# non-python dependency
'nodejs >=9.11.1',
]
setup_args = dict(
name='panel',
version=get_setup_version("panel"),
description='A high level app and dashboarding solution for Python.',
long_description=open('README.md').read() if os.path.isfile('README.md') else 'Consult README.md',
long_description_content_type="text/markdown",
author="HoloViz",
author_email="developers@holoviz.org",
maintainer="HoloViz",
maintainer_email="developers@holoviz.org",
platforms=['Windows', 'Mac OS X', 'Linux'],
license='BSD',
url='http://panel.holoviz.org',
cmdclass=_COMMANDS,
packages=find_packages(),
include_package_data=True,
classifiers=[
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Legal Industry",
"Intended Audience :: Other Audience",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Office/Business",
"Topic :: Office/Business :: Financial",
"Topic :: Software Development :: Libraries"],
python_requires=">=3.6",
entry_points={
'console_scripts': [
'panel = panel.cli:main'
]},
install_requires=install_requires,
extras_require=extras_require,
tests_require=extras_require['tests']
)
if __name__ == "__main__":
example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'panel', 'examples')
if 'develop' not in sys.argv and 'egg_info' not in sys.argv:
pyct.build.examples(example_path, __file__, force=True)
setup(**setup_args)
if os.path.isdir(example_path):
shutil.rmtree(example_path)
| 27.837838
| 102
| 0.628479
|
43a37d1c5a2bbfe7041983dbe4f46ac15a2006d5
| 457
|
py
|
Python
|
Exercicios/ex062.py
|
MateusBarboza99/Python-03-
|
9c6df88aaa8ba83d385b92722ed1df5873df3a77
|
[
"MIT"
] | null | null | null |
Exercicios/ex062.py
|
MateusBarboza99/Python-03-
|
9c6df88aaa8ba83d385b92722ed1df5873df3a77
|
[
"MIT"
] | null | null | null |
Exercicios/ex062.py
|
MateusBarboza99/Python-03-
|
9c6df88aaa8ba83d385b92722ed1df5873df3a77
|
[
"MIT"
] | null | null | null |
print('Gerador de PA')
print('-=' * 10)
primeiro = int(input('Primeiro termo:'))
razão = int(input('Razão: '))
termo = primeiro
cont = 1
total = 0
mais = 10
while mais != 0:
total += mais
while cont <= total:
print('{} --> '.format(termo), end='')
termo += razão
cont += 1
print('PAUSA..')
mais = int(input('Quer mostrar mais quantos termos? '))
print('Progressão finalizada com {} termos mostrados '.format(total))
| 24.052632
| 69
| 0.592998
|
2fe214d823303ef7fcc08fb11bad9e49402a21bc
| 2,862
|
py
|
Python
|
vaetc/models/dipvae.py
|
ganmodokix/vaetc
|
866b79677b4f06603203376d967989dedadbffae
|
[
"MIT"
] | null | null | null |
vaetc/models/dipvae.py
|
ganmodokix/vaetc
|
866b79677b4f06603203376d967989dedadbffae
|
[
"MIT"
] | null | null | null |
vaetc/models/dipvae.py
|
ganmodokix/vaetc
|
866b79677b4f06603203376d967989dedadbffae
|
[
"MIT"
] | null | null | null |
from typing import Optional, Tuple
import math
import torch
from .utils import detach_dict
from vaetc.network.reparam import reparameterize
from vaetc.network.losses import neglogpxz_gaussian, kl_gaussian
from .vae import VAE
def cov(x: torch.Tensor) -> torch.Tensor:
""" Sample covariance.
Args:
x (torch.Tensor): shape (B, L)
Returns:
torch.Tensor: shape (L, L)
"""
# E[x x^T]
exxt = torch.mean(x[:,None,:] * x[:,:,None], dim=0)
# E[x] E[x]^T
ex = torch.mean(x, dim=0)
exext = ex[:,None] * ex[None,:]
return exxt - exext
def dip_losses(cov_matrix: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# cov_matrix: (L, L)
diag_part = torch.diagonal(cov_matrix) # (L, )
on_diag = torch.diagflat(diag_part) # (L, L)
off_diag = cov_matrix - on_diag # (L, L)
return (off_diag ** 2).sum(), ((diag_part - 1) ** 2).sum()
def dip_i_losses(mean: torch.Tensor) -> torch.Tensor:
cov_mean = cov(mean)
return dip_losses(cov_mean)
def dip_ii_losses(mean: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
cov_mean = cov(mean)
cov_eps = logvar.exp().mean(dim=0).diagflat()
return dip_losses(cov_mean + cov_eps)
class DIPVAEI(VAE):
""" DIP-VAE-I
[Kumar+, 2018 (https://openreview.net/forum?id=H1kG7GZAW)] """
def __init__(self, hyperparameters: dict):
super().__init__(hyperparameters)
self.ld = float(hyperparameters["ld"])
self.lod = float(hyperparameters["lod"])
def loss(self, x, z, mean, logvar, x2, progress: Optional[float] = None):
# Losses
loss_ae = torch.mean(neglogpxz_gaussian(x, x2))
loss_reg = torch.mean(kl_gaussian(mean, logvar))
loss_lod, loss_ld = dip_i_losses(mean)
# Total loss
loss = loss_ae + loss_reg + loss_lod * self.lod + loss_ld * self.ld
return loss, detach_dict({
"loss": loss,
"loss_ae": loss_ae,
"loss_reg": loss_reg,
"loss_lod": loss_lod,
"loss_ld": loss_ld,
})
class DIPVAEII(DIPVAEI):
""" DIP-VAE-II
[Kumar+, 2018 (https://openreview.net/forum?id=H1kG7GZAW)] """
def __init__(self, hyperparameters: dict):
super().__init__(hyperparameters)
def loss(self, x, z, mean, logvar, x2, progress: Optional[float] = None):
# Losses
loss_ae = torch.mean(neglogpxz_gaussian(x, x2))
loss_reg = torch.mean(kl_gaussian(mean, logvar))
loss_lod, loss_ld = dip_ii_losses(mean, logvar)
# Total loss
loss = loss_ae + loss_reg + loss_lod * self.lod + loss_ld * self.ld
return loss, detach_dict({
"loss": loss,
"loss_ae": loss_ae,
"loss_reg": loss_reg,
"loss_lod": loss_lod,
"loss_ld": loss_ld,
})
| 26.5
| 78
| 0.601328
|
2f8f91014aa0aa7b77fa32395798a6e03dd451eb
| 1,261
|
py
|
Python
|
Blog-Language-Two-Site/config/urls.py
|
Mrsiahkhanie/Blog-Google-Login
|
2855d6f62d8fddabecce37ae16de4e69bfec65e2
|
[
"MIT"
] | null | null | null |
Blog-Language-Two-Site/config/urls.py
|
Mrsiahkhanie/Blog-Google-Login
|
2855d6f62d8fddabecce37ae16de4e69bfec65e2
|
[
"MIT"
] | null | null | null |
Blog-Language-Two-Site/config/urls.py
|
Mrsiahkhanie/Blog-Google-Login
|
2855d6f62d8fddabecce37ae16de4e69bfec65e2
|
[
"MIT"
] | null | null | null |
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.i18n import i18n_patterns
from SlideShow.views import change_lang, logout_views
urlpatterns = [
path('change_lang', change_lang, name='change_lang'),
]
urlpatterns += i18n_patterns(
path('admin/', admin.site.urls),
path('', include("SlideShow.urls")),
path('logout/', logout_views, name="logout"),
path('', include('social_django.urls', namespace='social')),
)
from django.conf import settings
from django.conf.urls.static import static
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.027778
| 77
| 0.727994
|
81e2c2e912abf56338a4fa62b22d6a1f7c05784f
| 3,684
|
py
|
Python
|
app/user.py
|
tlh45342/tradinghook
|
bd8a3bf31360a0d8dc0677b548c9de84604861b1
|
[
"Apache-2.0"
] | null | null | null |
app/user.py
|
tlh45342/tradinghook
|
bd8a3bf31360a0d8dc0677b548c9de84604861b1
|
[
"Apache-2.0"
] | null | null | null |
app/user.py
|
tlh45342/tradinghook
|
bd8a3bf31360a0d8dc0677b548c9de84604861b1
|
[
"Apache-2.0"
] | 1
|
2021-09-22T20:04:55.000Z
|
2021-09-22T20:04:55.000Z
|
from flask import render_template, request, flash, session, url_for, redirect
from flask_login import LoginManager, login_user, logout_user, login_required, current_user, UserMixin
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from app import app, login_manager
import sqlite3
import logging
import config as CONFIG
class User(UserMixin):
def __init__(self, id, email, password):
self.id = id
self.email = email
self.password = password
self.authenticated = False
def is_active(self):
return self.is_active()
def is_anonymous(self):
return False
def is_authenticated(self):
return self.authenticated
def is_active(self):
return True
def get_id(self):
return self.id
class LoginForm(FlaskForm):
username = StringField('Username')
password = PasswordField('Password')
submit = SubmitField('Submit')
# callback to reload the user object
@login_manager.user_loader
def load_user(userid):
#print("load_user:")
conn = sqlite3.connect(CONFIG.DATABASE)
curs = conn.cursor()
query = """SELECT * from "users" where "id" = "{0}";""".format(userid)
#print("query:", query)
curs.execute(query)
row = curs.fetchone() #return class tuple
#print("returned row", row)
#print(type(row))
if row is None:
#print("user not found:")
return None
else:
#print("user found:")
#print("row0:",row[0])
return User(row[0], row[1], row[2])
@app.route("/login", methods=['GET','POST'])
def login():
#logging.info("login()")
form = LoginForm()
if request.method == "GET":
#logging.info("GET")
#query_string = request.query_string
username = request.args.get('username')
#logging.info("username:")
#logging.info(username)
password = request.args.get('password')
#logging.info("password:")
#logging.info(password)
#logging.info("querystring:")
#logging.info(query_string)
if username != None:
if (len(username) != 0) and (len(password) != 0):
#logging.info("login()[inside]")
conn = sqlite3.connect(CONFIG.DATABASE)
cursor = conn.cursor()
select = cursor.execute("""SELECT * FROM "users" where "username" = "{0}";""".format(username))
try:
row = list(cursor.fetchone())
logging.info(row)
if (username == row[1]) and (password == row[2]):
id = row[0]
user = User(id,username,password)
current_user.authenticated = True
# print("x:", current_user.authenticated)
login_user(user)
cursor.close()
conn.close()
return redirect('/')
else:
return redirect('/')
except Exception as e:
logging.info("exception at login (ERR:1234):")
cursor.close()
conn.close()
return redirect('/')
else:
return render_template('login.html',title='Login', form=form)
@app.route('/logout', methods=['GET','POST'])
def logout():
if current_user.is_authenticated:
logout_user()
return redirect('/')
| 34.111111
| 112
| 0.536645
|
31762c1140d0ae988640f72c45e06d2a8324e3a7
| 323
|
py
|
Python
|
ServerComponent/venv/Lib/site-packages/rsrc/conf/__init__.py
|
CDU55/FakeNews
|
707bd48dd78851081d98ad21bbdadfc2720bd644
|
[
"MIT"
] | null | null | null |
ServerComponent/venv/Lib/site-packages/rsrc/conf/__init__.py
|
CDU55/FakeNews
|
707bd48dd78851081d98ad21bbdadfc2720bd644
|
[
"MIT"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
ServerComponent/venv/Lib/site-packages/rsrc/conf/__init__.py
|
CDU55/FakeNews
|
707bd48dd78851081d98ad21bbdadfc2720bd644
|
[
"MIT"
] | 1
|
2020-10-19T14:55:23.000Z
|
2020-10-19T14:55:23.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from easyconfig import Config, envvar_object
from . import default_settings
settings = Config()
settings.from_object(default_settings)
# Override default settings if `RESOURCE_SETTINGS_MODULE` is given
settings.from_object(envvar_object('RESOURCE_SETTINGS_MODULE', True))
| 23.071429
| 69
| 0.789474
|
7da6a8e7b32411e6cc012fab0bdf7cab1b9afed6
| 3,437
|
py
|
Python
|
anime_downloader/sites/anime8.py
|
shishirjha/anime-downloader
|
c9297119883b2f3d1420fe302c4d01e654153645
|
[
"Unlicense"
] | null | null | null |
anime_downloader/sites/anime8.py
|
shishirjha/anime-downloader
|
c9297119883b2f3d1420fe302c4d01e654153645
|
[
"Unlicense"
] | null | null | null |
anime_downloader/sites/anime8.py
|
shishirjha/anime-downloader
|
c9297119883b2f3d1420fe302c4d01e654153645
|
[
"Unlicense"
] | null | null | null |
import logging
import re
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class Anime8(Anime, sitename = 'anime8'):
sitename = 'anime8'
@classmethod
def search(cls, query):
soup = helpers.soupify(helpers.get('https://anime8.ru/Search/', params={'s': query}).text)
results = soup.select('div.ml-item > a')
search_results = [
SearchResult(
title = i.find('h2').text,
url = i['href'],
meta_info = {
'version_key_subbed':'(Sub)',
'version_key_dubbed':'(Dub)'
})
for i in results
]
return search_results
def _scrape_episodes(self):
"""
Because of how the website is built,
the only way to access the episodes is by going to the last episode available
thats why im making two requests here.
"""
link = helpers.soupify(helpers.get(self.url).text).select_one('div#mv-info > a')['href']
soup = helpers.soupify(helpers.get(link).text)
eps = soup.select('a[class*="btn-eps first-ep last-ep"]')
eps = [x.get('href') for x in eps]
#Seperating normal episodes from the special episodes
correct_eps = []
special_eps = []
special_seperator = ['-Preview', '-Special']
for episode in eps:
ep_text = episode.split('/')[-1].split('?')[0] #Getting the episode type from the url
#Only "The God of High School" has a sneak peak episode and it is broken in the 1st 10 seconds
if '-Sneak-Peak' in ep_text:
continue
# Here i add the special episodes to a seperate list
if ep_text in special_seperator:
special_eps.append(episode)
# Here i add the normal episodes to the correct_eps list
else:
correct_eps.append(episode)
# If configured to do so it will add all the special eps to the end of the list
if self.config['include_special_eps']:
correct_eps.extend(special_eps)
return correct_eps
def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url))
self.title = soup.select('div.thumb.mvic-thumb > img')[0]['alt']
class Anime8Episode(AnimeEpisode, sitename='anime8'):
def _get_sources(self):
resp = helpers.get(self.url)
# Gets the ctk and id from the page used for a post request.
ctk = re.search(r"ctk\s+=\s+'(.*)?';", resp.text).group(1)
_id = re.search(r"episode_id\s*=\s*([^;]*)", resp.text).group(1)
logger.info('ctk: {}'.format(ctk))
logger.info('id: {}'.format(_id))
# The post request returns an embed.
resp = helpers.post("https://anime8.ru/ajax/anime/load_episodes_v2?s=fserver", data = {"episode_id": _id, "ctk": ctk})
# Gets the real embed url. Json could be used on the post request, but this is probably more reliable.
# Skips if no episode found.
if not resp.json().get('status'):
return ''
embed = re.search(r"iframe\s*src.*?\"([^\"]*)", resp.text).group(1).replace('\\','')
return [('streamx', embed)]
| 37.769231
| 127
| 0.573465
|
6c3cffd3cb203ee5abf430c84ca98d3a4b7b71c2
| 2,957
|
py
|
Python
|
go/apps/http_api_nostream/view_definition.py
|
lynnUg/vumi-go
|
852f906c46d5d26940bd6699f11488b73bbc3742
|
[
"BSD-3-Clause"
] | null | null | null |
go/apps/http_api_nostream/view_definition.py
|
lynnUg/vumi-go
|
852f906c46d5d26940bd6699f11488b73bbc3742
|
[
"BSD-3-Clause"
] | null | null | null |
go/apps/http_api_nostream/view_definition.py
|
lynnUg/vumi-go
|
852f906c46d5d26940bd6699f11488b73bbc3742
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from go.conversation.view_definition import (
ConversationViewDefinitionBase, EditConversationView)
from go.apps.http_api_nostream.definition import DEFAULT_METRIC_STORE
class TokenForm(forms.Form):
api_tokens = forms.CharField(
help_text='The access token for this HTTP Conversation.',
required=True)
ignore_messages = forms.BooleanField(
help_text='Ignore messages instead of forwarding them.',
required=False)
push_message_url = forms.CharField(
help_text='The URL to forward messages to via HTTP POST.',
required=False)
ignore_events = forms.BooleanField(
help_text='Ignore events instead of forwarding them.',
required=False)
push_event_url = forms.CharField(
help_text='The URL to forward events to via HTTP POST.',
required=False)
metric_store = forms.CharField(
help_text='Which store to publish metrics to.',
required=False)
def clean(self):
cleaned_data = super(TokenForm, self).clean()
if not cleaned_data['ignore_messages']:
if not cleaned_data['push_message_url']:
self._errors['push_message_url'] = self.error_class([
u'This field is required unless messages are ignored.'])
del cleaned_data['push_message_url']
if not cleaned_data['ignore_events']:
if not cleaned_data['push_event_url']:
self._errors['push_event_url'] = self.error_class([
u'This field is required unless events are ignored.'])
del cleaned_data['push_event_url']
return cleaned_data
@staticmethod
def initial_from_config(data):
data.setdefault('api_tokens', [])
return {
'api_tokens': (data['api_tokens'][0]
if data['api_tokens'] else None),
'push_message_url': data.get('push_message_url', None),
'push_event_url': data.get('push_event_url', None),
'metric_store': data.get('metric_store', DEFAULT_METRIC_STORE),
'ignore_events': data.get('ignore_events', False),
'ignore_messages': data.get('ignore_messages', False),
}
def to_config(self):
data = self.cleaned_data
return {
'api_tokens': [data['api_tokens']],
'push_message_url': data['push_message_url'] or None,
'push_event_url': data['push_event_url'] or None,
'metric_store': data.get('metric_store') or DEFAULT_METRIC_STORE,
'ignore_events': data.get('ignore_events', False),
'ignore_messages': data.get('ignore_messages', False),
}
class EditHttpApiNoStreamView(EditConversationView):
edit_forms = (
('http_api_nostream', TokenForm),
)
class ConversationViewDefinition(ConversationViewDefinitionBase):
edit_view = EditHttpApiNoStreamView
| 37.43038
| 77
| 0.645587
|
6892d9f67b4bb5347e9fa8961514ec09e8044b1e
| 5,109
|
py
|
Python
|
Python/Tags_Scraper/Fetch_Tags.py
|
iamakkkhil/Rotten-Scripts
|
116ae502271d699db88add5fd1cf733d01134b7d
|
[
"MIT"
] | 5
|
2021-08-21T14:33:01.000Z
|
2021-08-28T16:57:48.000Z
|
Python/Tags_Scraper/Fetch_Tags.py
|
iamakkkhil/Rotten-Scripts
|
116ae502271d699db88add5fd1cf733d01134b7d
|
[
"MIT"
] | 3
|
2021-07-30T19:35:22.000Z
|
2021-08-05T18:37:15.000Z
|
Python/Tags_Scraper/Fetch_Tags.py
|
iamakkkhil/Rotten-Scripts
|
116ae502271d699db88add5fd1cf733d01134b7d
|
[
"MIT"
] | 1
|
2021-07-31T13:46:21.000Z
|
2021-07-31T13:46:21.000Z
|
from bs4 import BeautifulSoup
import requests
import os
import urllib3
""" Fetch_Tags module's task is to scrape through the the websites and extract informtion from particular tags
from various websites and count the frequency of keywords used.
"""
class FetchTags:
"""FetchTags class has Constructor, get_results as method with with the Main.py will interact.
Other methods with which the internal methods will interact are get_html,get_keywords, save_file and count_frequency.
"""
def __init__(self, ListOfUrl):
"""__init__ method takes ListOfUrl generated by SearchResuts module"""
self.ListOfUrl = ListOfUrl
self.url = None
self.usr_agent = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36"
}
self.directory = os.getcwd()
self.alt_dict = {}
self.title_dict = {}
self.h2_dict = {}
self.h3_dict = {}
def get_results(self):
"""get_results method returns 4 dictionaries with keywords as keys and their frecuency as value"""
i = 1
for url in self.ListOfUrl:
html = self.get_html(url)
if html:
self.get_keywords(html)
print("Retrieved query no : {}".format(i))
i += 1
else:
continue
return self.alt_dict, self.title_dict, self.h2_dict, self.h3_dict
def get_html(self, url):
"""get_html method returns the html of the url passed"""
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
response = requests.get(url, headers=self.usr_agent, verify=False)
response.raise_for_status()
return response.text
except:
return None
def get_keywords(self, html):
"""get_keywords returns the dictionary of frewuency of keywords used in title tag, header tags and alternative tag"""
soup = BeautifulSoup(html, "html.parser")
# print(soup.prettify())
# FetchTags.save_file(soup.prettify(), self.directory)
# meta = soup.find_all('meta')
h1 = []
h2 = []
h3 = []
title = soup.find("title")
if title:
title = title.get_text().strip()
header2 = soup.find_all("h2")
if header2:
for ele in header2:
if ele:
h2.append(ele.get_text().strip())
header3 = soup.find_all("h3")
if header3:
for ele in header3:
if ele:
h3.append(ele.get_text().strip())
alt = []
for div in soup.find_all("div"):
for img in div.find_all("img", alt=True):
if img["alt"] not in alt:
alt.append(img["alt"])
content = [alt, title, h1, h2, h3]
# self.save_file(content, self.directory)
self.count_frequency(alt, title, h2, h3)
def save_file(self, content, address):
"""this method is for testing purpose only and it saves the content of html file"""
alternative_text_list = content[0]
content = content[1::]
if os.path.exists(address + r"/webpage.html"):
os.remove(address + r"/webpage.html")
fp = open(address + r"/webpage.html", "w")
for line in content:
fp.write(str(line) + "\n\n")
for text in alternative_text_list:
fp.write(str(text) + "\n\n")
fp.close()
def count_frequency(self, alt_tag_list, title, h2_list, h3_list):
"""count_frequency method counts the frequency of keywords in different tags and uodates their respective dictionary"""
special_characteres = "-|"
if len(alt_tag_list) != 0:
for data in alt_tag_list:
l = data.split()
for word in l:
self.alt_dict[word] = self.alt_dict.get(word, 0) + 1
if title:
for word in title.split():
if word not in special_characteres:
self.title_dict[word] = self.title_dict.get(word, 0) + 1
if len(h2_list) != 0:
for line in h2_list:
if line != None:
for word in line.split():
if word not in special_characteres:
self.h2_dict[word] = self.h2_dict.get(word, 0) + 1
if len(h3_list) != 0:
for line in h3_list:
if line != None:
for word in line.split():
if word not in special_characteres:
self.h3_dict[word] = self.h3_dict.get(word, 0) + 1
if __name__ == "__main__":
links = [
"http://uehy.mcminstrument.it/scrape-google-search-results-python.html",
"http://iduphul.gpkztwwz.site/web-scrape-google-search-results-python.html",
]
obj = FetchTags(links)
html = obj.get_html("https://ahrefs.com/blog/image-seo/")
obj.get_keywords(html)
| 36.755396
| 127
| 0.571345
|
700947169d38cdd654f2808f7a8b85fa51ffbfd0
| 1,951
|
py
|
Python
|
vega/datasets/transforms/MaskTransform.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/datasets/transforms/MaskTransform.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/datasets/transforms/MaskTransform.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is a class for MaskTransform."""
import numpy as np
import mmcv
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class MaskTransform(object):
"""Mask tramsform method, which contains.
1. resize masks to expected size and stack to a single array
2. flip the masks (if needed)
3. pad the masks (if needed)
"""
def __call__(self, masks, pad_shape, scale_factor, flip=False):
"""Call function of MaskTransform.
:param masks: mask image
:type masks: ndarray
:param pad_shape: (height, width)
:type pad_shape: tuple
:param scale_factor: the scale factor according to the image tramsform
:type scale_factor: float
:param flip: whether to flop or not, defaults to False
:type flip: bool
:return: the mask image after transform
:rtype: ndarray
"""
masks = [
mmcv.imrescale(mask, scale_factor, interpolation='nearest')
for mask in masks
]
if flip:
masks = [mask[:, ::-1] for mask in masks]
padded_masks = [
mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks
]
padded_masks = np.stack(padded_masks, axis=0)
return padded_masks
| 34.22807
| 78
| 0.665812
|
0c50d3abe8c82dc7982a80203790ad749bf8e01c
| 3,482
|
py
|
Python
|
minitests/timing_loads/span2/make_loads.py
|
Keno/prjtrellis
|
3311e6d814e0001c8785d6d77a4c93e327875b6d
|
[
"ISC"
] | 256
|
2018-03-05T00:28:46.000Z
|
2022-03-04T22:33:29.000Z
|
minitests/timing_loads/span2/make_loads.py
|
Keno/prjtrellis
|
3311e6d814e0001c8785d6d77a4c93e327875b6d
|
[
"ISC"
] | 70
|
2018-03-12T21:55:02.000Z
|
2020-06-22T12:06:08.000Z
|
minitests/timing_loads/span2/make_loads.py
|
Keno/prjtrellis
|
3311e6d814e0001c8785d6d77a4c93e327875b6d
|
[
"ISC"
] | 68
|
2018-03-12T21:05:01.000Z
|
2021-03-14T21:08:33.000Z
|
#!/usr/bin/env python3
import diamond
from string import Template
import re
device = "LFE5U-45F"
ncl = """
::FROM-WRITER;
design top
{
device
{
architecture sa5p00;
device LFE5U-25F;
package CABGA381;
performance "8";
}
comp SLICE_0
[,,,,A0,B0,D0,C0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,]
{
logical
{
cellmodel-name SLICE;
program "MODE:LOGIC "
"K0::H0=0 "
"K1::H1=0 "
"F0:F "
"F1:F ";
primitive K0 i3_0_lut;
primitive K1 i3_1_lut;
}
site R6C10A;
}
comp SLICE_1
[,,,,A0,B0,D0,C0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,]
{
logical
{
cellmodel-name SLICE;
program "MODE:LOGIC "
"K0::H0=0 "
"K1::H1=0 "
"F0:F "
"F1:F ";
primitive K0 i4_0_lut;
primitive K1 i4_1_lut;
}
site R6C10B;
}
comp SLICE_2
[,,,,A0,B0,D0,C0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,]
{
logical
{
cellmodel-name SLICE;
program "MODE:LOGIC "
"K0::H0=0 "
"K1::H1=0 "
"F0:F "
"F1:F ";
primitive K0 i5_0_lut;
primitive K1 i5_1_lut;
}
site R6C10C;
}
comp SLICE_3
[,,,,A0,B0,D0,C0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,]
{
logical
{
cellmodel-name SLICE;
program "MODE:LOGIC "
"K0::H0=0 "
"K1::H1=0 "
"F0:F "
"F1:F ";
primitive K0 i6_0_lut;
primitive K1 i6_1_lut;
}
site R6C10D;
}
comp SLICE_4
[,,,,A0,B0,D0,C0,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,]
{
logical
{
cellmodel-name SLICE;
program "MODE:LOGIC "
"K0::H0=0 "
"K1::H1=0 "
"F0:F "
"F1:F ";
primitive K0 i6_0_lut;
primitive K1 i6_1_lut;
}
site R6C12C;
}
signal q_c
{
signal-pins
// drivers
(SLICE_4, F1),
// loads
$loads;
route
$route;
}
}
"""
sinks = [
("(SLICE_0, A0)", "A0"),
("(SLICE_0, A1)", "A1"),
("(SLICE_1, A0)", "A2"),
("(SLICE_1, A1)", "A3"),
("(SLICE_2, A0)", "A4"),
("(SLICE_2, A1)", "A5"),
("(SLICE_3, A0)", "A6"),
("(SLICE_3, A1)", "A7"),
]
timings = []
for i in range(1, 9):
loads = [sinks[j][0] for j in range(i)]
route = ["R6C12_F5_SLICE.R6C12_F5",
"R6C12_F5.R6C11_H02W0701"]
for j in range(i):
destwire = sinks[j][1]
route.append("R6C11_H02W0701.R6C10_{}".format(destwire))
route.append("R6C10_{}.R6C10_{}_SLICE".format(destwire, destwire))
loads_txt = ", \n".join(loads)
route_txt = ", \n".join(route)
desfile = "fanout_{}.ncl".format(i)
with open(desfile, "w") as ouf:
ouf.write(Template(ncl).substitute(loads=loads_txt, route=route_txt))
diamond.run(device, desfile)
with open(desfile.replace("ncl", "twr"), "r") as twrf:
for line in twrf:
m = re.match(r"\s+([0-9.]+)ns\s+R6C12C\.F1 to R6C10A\.A0\s+", line)
if m:
timings.append(float(m.group(1)))
print("")
print("")
print("Fanout\tDelay")
for i in range(len(timings)):
print("{}\t{}".format(i+1, timings[i]))
| 22.464516
| 79
| 0.428777
|
e19985ea4f8c1788461eef36fa808f13858029e2
| 1,647
|
py
|
Python
|
startup/ckpt.py
|
videoturingtest/vtt_qa_pipeline
|
83efc672fad0ca2356caede6f0a3875f054037fc
|
[
"MIT"
] | 1
|
2019-09-19T07:49:21.000Z
|
2019-09-19T07:49:21.000Z
|
startup/ckpt.py
|
videoturingtest/vtt_qa_pipeline
|
83efc672fad0ca2356caede6f0a3875f054037fc
|
[
"MIT"
] | null | null | null |
startup/ckpt.py
|
videoturingtest/vtt_qa_pipeline
|
83efc672fad0ca2356caede6f0a3875f054037fc
|
[
"MIT"
] | null | null | null |
import os
import torch
from torch import nn
import torch.nn.functional as F
from dataset import get_iterator
from model import get_model
from utils import get_dirname_from_args
def get_ckpt_path(args, epoch, loss):
ckpt_name = get_dirname_from_args(args)
ckpt_path = args.ckpt_path / ckpt_name
args.ckpt_path.mkdir(exist_ok=True)
ckpt_path.mkdir(exist_ok=True)
loss = '{:.4f}'.format(loss)
ckpt_path = ckpt_path / \
f'loss_{loss}_epoch_{epoch}.pickle'
return ckpt_path
def save_ckpt(args, epoch, loss, model, vocab):
print(f'saving epoch {epoch}')
dt = {
'args': args,
'epoch': epoch,
'loss': loss,
'model': model.state_dict(),
'vocab': vocab,
}
ckpt_path = get_ckpt_path(args, epoch, loss)
print(f"Saving checkpoint {ckpt_path}")
torch.save(dt, ckpt_path)
def get_model_ckpt(args):
ckpt_available = args.ckpt_name is not None
if ckpt_available:
name = f'{args.ckpt_name}'
name = f'{name}*' if not name.endswith('*') else name
ckpt_paths = sorted(args.ckpt_path.glob(f'{name}'), reverse=False)
assert len(ckpt_paths) > 0, f"no ckpt candidate for {args.ckpt_path / args.ckpt_name}"
ckpt_path = ckpt_paths[0] # monkey patch for choosing the best ckpt
print(f"loading from {ckpt_path}")
dt = torch.load(ckpt_path)
args.update(dt['args'])
vocab = dt['vocab']
iters, vocab = get_iterator(args, vocab)
model = get_model(args, vocab)
if ckpt_available:
model.load_state_dict(dt['model'])
return args, model, iters, vocab, ckpt_available
| 28.396552
| 94
| 0.656952
|
0cbd41e70335919f2c26b055e519a997dacbd91a
| 812
|
py
|
Python
|
kvaak/api/urls.py
|
terokoodaa/kvaak-backend
|
a0319d701a3501d0baca8d32497a0fe935811f83
|
[
"MIT"
] | null | null | null |
kvaak/api/urls.py
|
terokoodaa/kvaak-backend
|
a0319d701a3501d0baca8d32497a0fe935811f83
|
[
"MIT"
] | null | null | null |
kvaak/api/urls.py
|
terokoodaa/kvaak-backend
|
a0319d701a3501d0baca8d32497a0fe935811f83
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url, include
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
from .views import species_list
from .views import species_detail
from .views import sightings_list
from .views import sighting_detail
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
# router.register(r'species', SpeciesViewSet)
urlpatterns = [
url(r'^species/$', species_list),
url(r'^species/(?P<pk>[0-9]+)$', species_detail),
url(r'^sightings/$', sightings_list),
url(r'^sightings/(?P<pk>[0-9]+)$', sighting_detail),
# url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 32.48
| 82
| 0.746305
|
ab85b4ba425a5075dfabe82d7ae512a2bbbd18bb
| 2,913
|
py
|
Python
|
project/preprocessing/convert_squad_1.py
|
EstevaoUyra/CISS_Project
|
d78c2617148109f1f9a5dfe54c3771ca68adc0fd
|
[
"MIT"
] | null | null | null |
project/preprocessing/convert_squad_1.py
|
EstevaoUyra/CISS_Project
|
d78c2617148109f1f9a5dfe54c3771ca68adc0fd
|
[
"MIT"
] | null | null | null |
project/preprocessing/convert_squad_1.py
|
EstevaoUyra/CISS_Project
|
d78c2617148109f1f9a5dfe54c3771ca68adc0fd
|
[
"MIT"
] | 1
|
2019-07-13T15:48:11.000Z
|
2019-07-13T15:48:11.000Z
|
import argparse
import json
import logging
import sys
class ParserWithUsage(argparse.ArgumentParser):
""" A custom parser that writes error messages followed by command line usage documentation."""
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def get_sentence_id_for_span(sentence_spans, span_start):
for index, (_, end) in enumerate(sentence_spans):
if span_start <= end:
return index
def tokenize_and_split_sentences(text: str):
import nltk
sentence_tokenizer = nltk.tokenize.punkt.PunktSentenceTokenizer()
sentence_spans = list(sentence_tokenizer.span_tokenize(text))
sentences = sentence_tokenizer.tokenize(text)
value_to_return = {"sentence_spans": sentence_spans, "sentences": sentences}
return value_to_return
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO,
datefmt='%m/%d/%Y %H:%M:%S')
parser = ParserWithUsage()
parser.description = "Reads the SQuAD 1 and converts it to JSON file containing question, sentence, is_answer"
parser.add_argument("--input", help="Input SQuAD JSON file", required=True)
parser.add_argument("--output", help="Output file", required=True)
args = parser.parse_args()
logging.info("STARTED")
output_data = []
with open(args.input, "r") as original_file:
original_data = json.load(original_file)["data"]
for doc_number, current_document in enumerate(original_data):
for paragraph_id, current_paragraph in enumerate(current_document["paragraphs"]):
processed_context = tokenize_and_split_sentences(current_paragraph["context"])
for current_question in current_paragraph["qas"]:
question_text = current_question["question"]
sentences_containing_answer = set(
[get_sentence_id_for_span(processed_context["sentence_spans"], a["answer_start"]) for a in
current_question["answers"]])
for index, s in enumerate(processed_context["sentences"]):
example = {"question": question_text, "sentence": s,
"label": int(index in sentences_containing_answer),
"paragraph_id": paragraph_id,
"doc_number": doc_number}
output_data.append(example)
if len(output_data) % 1000 == 0:
logging.info("Processed {} documents".format(len(output_data)))
logging.info("Writing to file {}".format(args.output))
with open(args.output, "w") as output_file:
json.dump(output_data, output_file, indent=4)
logging.info("DONE")
if __name__ == "__main__":
main()
| 42.217391
| 114
| 0.636114
|
775d3b3d3e0fa773182298778f80f8df2da2e46b
| 35,459
|
py
|
Python
|
library/python/pytest/plugins/ya.py
|
re9ulus/catboost
|
040efc31707c83e62382b8d4396a1ba9ee36acb5
|
[
"Apache-2.0"
] | null | null | null |
library/python/pytest/plugins/ya.py
|
re9ulus/catboost
|
040efc31707c83e62382b8d4396a1ba9ee36acb5
|
[
"Apache-2.0"
] | null | null | null |
library/python/pytest/plugins/ya.py
|
re9ulus/catboost
|
040efc31707c83e62382b8d4396a1ba9ee36acb5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import re
import sys
import os
import logging
import fnmatch
import json
import time
import collections
import py
import pytest
import _pytest
import _pytest.mark
import signal
try:
import resource
except ImportError:
resource = None
try:
import library.python.pytest.yatest_tools as tools
except ImportError:
# fallback for pytest script mode
import yatest_tools as tools
import yatest_lib.tools
import yatest_lib.external as canon
console_logger = logging.getLogger("console")
yatest_logger = logging.getLogger("ya.test")
_pytest.main.EXIT_NOTESTSCOLLECTED = 0
SHUTDOWN_REQUESTED = False
def configure_pdb_on_demand():
import signal
if hasattr(signal, "SIGUSR1"):
def on_signal(*args):
import pdb
pdb.set_trace()
signal.signal(signal.SIGUSR1, on_signal)
class TestMisconfigurationException(Exception):
pass
class CustomImporter(object):
def __init__(self, roots):
self._roots = roots
def find_module(self, fullname, package_path=None):
for path in self._roots:
full_path = self._get_module_path(path, fullname)
if os.path.exists(full_path) and os.path.isdir(full_path) and not os.path.exists(os.path.join(full_path, "__init__.py")):
open(os.path.join(full_path, "__init__.py"), "w").close()
return None
def _get_module_path(self, path, fullname):
return os.path.join(path, *fullname.split('.'))
class RunMode(object):
Run = "run"
List = "list"
class YaTestLoggingFileHandler(logging.FileHandler):
pass
def setup_logging(log_path, level=logging.DEBUG, *other_logs):
logs = [log_path] + list(other_logs)
root_logger = logging.getLogger()
for i in range(len(root_logger.handlers) - 1, -1, -1):
if isinstance(root_logger.handlers[i], YaTestLoggingFileHandler):
root_logger.handlers.pop(i)
root_logger.setLevel(level)
for log_file in logs:
file_handler = YaTestLoggingFileHandler(log_file)
log_format = '%(asctime)s - %(levelname)s - %(name)s - %(funcName)s: %(message)s'
file_handler.setFormatter(logging.Formatter(log_format))
file_handler.setLevel(level)
root_logger.addHandler(file_handler)
def pytest_addoption(parser):
parser.addoption("--build-root", action="store", dest="build_root", default="", help="path to the build root")
parser.addoption("--dep-root", action="append", dest="dep_roots", default=[], help="path to the dep build roots")
parser.addoption("--source-root", action="store", dest="source_root", default="", help="path to the source root")
parser.addoption("--data-root", action="store", dest="data_root", default="", help="path to the arcadia_tests_data root")
parser.addoption("--output-dir", action="store", dest="output_dir", default="", help="path to the test output dir")
parser.addoption("--python-path", action="store", dest="python_path", default="", help="path the canonical python binary")
parser.addoption("--valgrind-path", action="store", dest="valgrind_path", default="", help="path the canonical valgring binary")
parser.addoption("--test-filter", action="append", dest="test_filter", default=None, help="test filter")
parser.addoption("--test-file-filter", action="append", dest="test_file_filter", default=None, help="test file filter")
parser.addoption("--test-param", action="append", dest="test_params", default=None, help="test parameters")
parser.addoption("--test-log-level", action="store", dest="test_log_level", choices=["critical", "error", "warning", "info", "debug"], default="debug", help="test log level")
parser.addoption("--mode", action="store", choices=[RunMode.List, RunMode.Run], dest="mode", default=RunMode.Run, help="testing mode")
parser.addoption("--test-list-file", action="store", dest="test_list_file")
parser.addoption("--modulo", default=1, type=int)
parser.addoption("--modulo-index", default=0, type=int)
parser.addoption("--split-by-tests", action='store_true', help="Split test execution by tests instead of suites", default=False)
parser.addoption("--project-path", action="store", default="", help="path to CMakeList where test is declared")
parser.addoption("--build-type", action="store", default="", help="build type")
parser.addoption("--flags", action="append", dest="flags", default=[], help="build flags (-D)")
parser.addoption("--sanitize", action="store", default="", help="sanitize mode")
parser.addoption("--test-stderr", action="store_true", default=False, help="test stderr")
parser.addoption("--test-debug", action="store_true", default=False, help="test debug mode")
parser.addoption("--root-dir", action="store", default=None)
parser.addoption("--ya-trace", action="store", dest="ya_trace_path", default=None, help="path to ya trace report")
parser.addoption("--ya-version", action="store", dest="ya_version", default=0, type=int, help="allows to be compatible with ya and the new changes in ya-dev")
parser.addoption(
"--test-suffix", action="store", dest="test_suffix", default=None, help="add suffix to every test name"
)
parser.addoption("--gdb-path", action="store", dest="gdb_path", default="", help="path the canonical gdb binary")
parser.addoption("--collect-cores", action="store_true", dest="collect_cores", default=False, help="allows core dump file recovering during test")
parser.addoption("--sanitizer-extra-checks", action="store_true", dest="sanitizer_extra_checks", default=False, help="enables extra checks for tests built with sanitizers")
parser.addoption("--report-deselected", action="store_true", dest="report_deselected", default=False, help="report deselected tests to the trace file")
parser.addoption("--pdb-on-sigusr1", action="store_true", default=False, help="setup pdb.set_trace on SIGUSR1")
parser.addoption("--test-tool-bin", help="Path to test_tool")
def pytest_configure(config):
pytest.register_assert_rewrite('__tests__')
config.option.continue_on_collection_errors = True
# XXX Strip java contrib from dep_roots - it's python-irrelevant code,
# The number of such deps may lead to problems - see https://st.yandex-team.ru/DEVTOOLS-4627
config.option.dep_roots = [e for e in config.option.dep_roots if not e.startswith('contrib/java')]
config.from_ya_test = "YA_TEST_RUNNER" in os.environ
config.test_logs = collections.defaultdict(dict)
config.test_metrics = {}
context = {
"project_path": config.option.project_path,
"test_stderr": config.option.test_stderr,
"test_debug": config.option.test_debug,
"build_type": config.option.build_type,
"test_traceback": config.option.tbstyle,
"flags": config.option.flags,
"sanitize": config.option.sanitize,
}
config.ya = Ya(
config.option.mode,
config.option.source_root,
config.option.build_root,
config.option.dep_roots,
config.option.output_dir,
config.option.test_params,
context,
config.option.python_path,
config.option.valgrind_path,
config.option.gdb_path,
config.option.data_root,
)
config.option.test_log_level = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARN,
"info": logging.INFO,
"debug": logging.DEBUG,
}[config.option.test_log_level]
if not config.option.collectonly:
setup_logging(os.path.join(config.ya.output_dir, "run.log"), config.option.test_log_level)
config.current_item_nodeid = None
config.current_test_name = None
config.test_cores_count = 0
config.collect_cores = config.option.collect_cores
config.sanitizer_extra_checks = config.option.sanitizer_extra_checks
config.test_tool_bin = config.option.test_tool_bin
if config.sanitizer_extra_checks:
for envvar in ['LSAN_OPTIONS', 'ASAN_OPTIONS']:
if envvar in os.environ:
os.environ.pop(envvar)
if envvar + '_ORIGINAL' in os.environ:
os.environ[envvar] = os.environ[envvar + '_ORIGINAL']
config.coverage = None
cov_prefix = os.environ.get('PYTHON_COVERAGE_PREFIX')
if cov_prefix:
config.coverage_data_dir = os.path.dirname(cov_prefix)
try:
import coverage
except ImportError:
coverage = None
logging.exception("Failed to import coverage module - no coverage will be collected")
if coverage:
cov = coverage.Coverage(
data_file=cov_prefix,
concurrency=['multiprocessing', 'thread'],
auto_data=True,
branch=True,
# debug=['pid', 'trace', 'sys', 'config'],
)
config.coverage = cov
config.coverage.start()
logging.info("Coverage will be collected during testing. pid: %d", os.getpid())
if config.option.root_dir:
config.rootdir = config.invocation_dir = py.path.local(config.option.root_dir)
# Arcadia paths from the test DEPENDS section of CMakeLists.txt
sys.path.insert(0, os.path.join(config.option.source_root, config.option.project_path))
sys.path.extend([os.path.join(config.option.source_root, d) for d in config.option.dep_roots])
sys.path.extend([os.path.join(config.option.build_root, d) for d in config.option.dep_roots])
# Build root is required for correct import of protobufs, because imports are related to the root
# (like import devtools.dummy_arcadia.protos.lib.my_proto_pb2)
sys.path.append(config.option.build_root)
os.environ["PYTHONPATH"] = os.pathsep.join(os.environ.get("PYTHONPATH", "").split(os.pathsep) + sys.path)
if not config.option.collectonly:
if config.option.ya_trace_path:
config.ya_trace_reporter = TraceReportGenerator(config.option.ya_trace_path)
else:
config.ya_trace_reporter = DryTraceReportGenerator(config.option.ya_trace_path)
config.ya_version = config.option.ya_version
sys.meta_path.append(CustomImporter([config.option.build_root] + [os.path.join(config.option.build_root, dep) for dep in config.option.dep_roots]))
if config.option.pdb_on_sigusr1:
configure_pdb_on_demand()
if hasattr(signal, "SIGUSR2"):
signal.signal(signal.SIGUSR2, _smooth_shutdown)
def _smooth_shutdown(*args):
cov = pytest.config.coverage
if cov:
cov.stop()
pytest.exit("Smooth shutdown requested")
def _get_rusage():
return resource and resource.getrusage(resource.RUSAGE_SELF)
def _collect_test_rusage(item):
if resource and hasattr(item, "rusage"):
finish_rusage = _get_rusage()
ya_inst = pytest.config.ya
def add_metric(attr_name, metric_name=None, modifier=None):
if not metric_name:
metric_name = attr_name
if not modifier:
modifier = lambda x: x
if hasattr(item.rusage, attr_name):
ya_inst.set_metric_value(metric_name, modifier(getattr(finish_rusage, attr_name) - getattr(item.rusage, attr_name)))
for args in [
("ru_maxrss", "ru_rss", lambda x: x*1024), # to be the same as in util/system/rusage.cpp
("ru_utime",),
("ru_stime",),
("ru_ixrss", None, lambda x: x*1024),
("ru_idrss", None, lambda x: x*1024),
("ru_isrss", None, lambda x: x*1024),
("ru_majflt", "ru_major_pagefaults"),
("ru_minflt", "ru_minor_pagefaults"),
("ru_nswap",),
("ru_inblock",),
("ru_oublock",),
("ru_msgsnd",),
("ru_msgrcv",),
("ru_nsignals",),
("ru_nvcsw",),
("ru_nivcsw",),
]:
add_metric(*args)
def _get_item_tags(item):
tags = []
for key, value in item.keywords.items():
if isinstance(value, _pytest.mark.MarkInfo) or isinstance(value, _pytest.mark.MarkDecorator):
tags.append(key)
return tags
def pytest_runtest_setup(item):
item.rusage = _get_rusage()
pytest.config.test_cores_count = 0
pytest.config.current_item_nodeid = item.nodeid
class_name, test_name = tools.split_node_id(item.nodeid)
test_log_path = tools.get_test_log_file_path(pytest.config.ya.output_dir, class_name, test_name)
setup_logging(
os.path.join(pytest.config.ya.output_dir, "run.log"),
pytest.config.option.test_log_level,
test_log_path
)
pytest.config.test_logs[item.nodeid]['log'] = test_log_path
pytest.config.test_logs[item.nodeid]['logsdir'] = pytest.config.ya.output_dir
pytest.config.current_test_log_path = test_log_path
pytest.config.current_test_name = "{}::{}".format(class_name, test_name)
separator = "#" * 100
yatest_logger.info(separator)
yatest_logger.info(test_name)
yatest_logger.info(separator)
yatest_logger.info("Test setup")
test_item = CrashedTestItem(item.nodeid, pytest.config.option.test_suffix)
pytest.config.ya_trace_reporter.on_start_test_class(test_item)
pytest.config.ya_trace_reporter.on_start_test_case(test_item)
def pytest_runtest_teardown(item, nextitem):
yatest_logger.info("Test teardown")
def pytest_runtest_call(item):
yatest_logger.info("Test call")
def pytest_deselected(items):
config = pytest.config
if config.option.report_deselected:
for item in items:
deselected_item = DeselectedTestItem(item.nodeid, config.option.test_suffix)
config.ya_trace_reporter.on_start_test_class(deselected_item)
config.ya_trace_reporter.on_start_test_case(deselected_item)
config.ya_trace_reporter.on_finish_test_case(deselected_item)
config.ya_trace_reporter.on_finish_test_class(deselected_item)
@pytest.mark.trylast
def pytest_collection_modifyitems(items, config):
def filter_items(filters):
filtered_items = []
deselected_items = []
for item in items:
canonical_node_id = str(CustomTestItem(item.nodeid, pytest.config.option.test_suffix))
matched = False
for flt in filters:
if "::" not in flt and "*" not in flt:
flt += "*" # add support for filtering by module name
if canonical_node_id.endswith(flt) or fnmatch.fnmatch(tools.escape_for_fnmatch(canonical_node_id), tools.escape_for_fnmatch(flt)):
matched = True
if matched:
filtered_items.append(item)
else:
deselected_items.append(item)
config.hook.pytest_deselected(items=deselected_items)
items[:] = filtered_items
# XXX - check to be removed when tests for peerdirs don't run
for item in items:
if not item.nodeid:
item._nodeid = os.path.basename(item.location[0])
if config.option.test_file_filter:
filter_items([f.replace("/", ".") for f in config.option.test_file_filter])
if config.option.test_filter:
filter_items(config.option.test_filter)
modulo = config.option.modulo
if modulo > 1:
items[:] = sorted(items, key=lambda item: item.nodeid)
modulo_index = config.option.modulo_index
split_by_tests = config.option.split_by_tests
items_by_classes = {}
res = []
for item in items:
if "()" in item.nodeid and not split_by_tests:
class_name = item.nodeid.split("()", 1)[0]
if class_name not in items_by_classes:
items_by_classes[class_name] = []
res.append(items_by_classes[class_name])
items_by_classes[class_name].append(item)
else:
res.append([item])
shift = int((len(res) + modulo - 1) / modulo)
start = modulo_index * shift
end = start + shift
chunk_items = []
for classes_items in res[start:end]:
chunk_items.extend(classes_items)
items[:] = chunk_items
yatest_logger.info("Modulo %s tests are: %s", modulo_index, chunk_items)
if config.option.mode == RunMode.Run:
for item in items:
test_item = NotLaunchedTestItem(item.nodeid, config.option.test_suffix)
config.ya_trace_reporter.on_start_test_class(test_item)
config.ya_trace_reporter.on_start_test_case(test_item)
config.ya_trace_reporter.on_finish_test_case(test_item)
config.ya_trace_reporter.on_finish_test_class(test_item)
elif config.option.mode == RunMode.List:
tests = []
for item in items:
item = CustomTestItem(item.nodeid, pytest.config.option.test_suffix, item.keywords)
record = {
"class": item.class_name,
"test": item.test_name,
"tags": _get_item_tags(item),
}
tests.append(record)
if config.option.test_list_file:
with open(config.option.test_list_file, 'w') as afile:
json.dump(tests, afile)
# TODO prettyboy remove after test_tool release - currently it's required for backward compatibility
sys.stderr.write(json.dumps(tests))
def pytest_collectreport(report):
if not report.passed:
if hasattr(pytest.config, 'ya_trace_reporter'):
test_item = TestItem(report, None, pytest.config.option.test_suffix)
pytest.config.ya_trace_reporter.on_error(test_item)
else:
sys.stderr.write(yatest_lib.tools.to_utf8(report.longrepr))
def pytest_runtest_makereport(item, call):
def makereport(item, call):
when = call.when
duration = call.stop-call.start
keywords = item.keywords
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, _pytest._code.code.ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(pytest.skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo, style=item.config.option.tbstyle)
for rwhen, key, content in item._report_sections:
sections.append(("Captured std%s %s" % (key, rwhen), content))
return _pytest.runner.TestReport(item.nodeid, item.location, keywords, outcome, longrepr, when, sections, duration)
def logreport(report, result):
test_item = TestItem(report, result, pytest.config.option.test_suffix)
if report.when == "call":
_collect_test_rusage(item)
pytest.config.ya_trace_reporter.on_finish_test_case(test_item)
elif report.when == "setup":
pytest.config.ya_trace_reporter.on_start_test_class(test_item)
if report.outcome != "passed":
pytest.config.ya_trace_reporter.on_start_test_case(test_item)
pytest.config.ya_trace_reporter.on_finish_test_case(test_item)
else:
pytest.config.ya_trace_reporter.on_start_test_case(test_item)
elif report.when == "teardown":
if report.outcome == "failed":
pytest.config.ya_trace_reporter.on_start_test_case(test_item)
pytest.config.ya_trace_reporter.on_finish_test_case(test_item)
pytest.config.ya_trace_reporter.on_finish_test_class(test_item)
rep = makereport(item, call)
if hasattr(call, 'result') and call.result:
result = call.result
if not pytest.config.from_ya_test:
ti = TestItem(rep, result, pytest.config.option.test_suffix)
tr = pytest.config.pluginmanager.getplugin('terminalreporter')
tr.write_line("{} - Validating canonical data is not supported when running standalone binary".format(ti), yellow=True, bold=True)
else:
result = None
# taken from arcadia/contrib/python/pytest/_pytest/skipping.py
try:
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, '_unexpectedsuccess'):
if rep.when == "call":
# we need to translate into how pytest encodes xpass
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
rep.outcome = "failed"
return rep
if not (call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception)):
evalxfail = getattr(item, '_evalxfail', None)
if not evalxfail:
return
if call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
if not item.config.getvalue("runxfail"):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
return rep
evalxfail = item._evalxfail
if not rep.skipped:
if not item.config.option.runxfail:
if evalxfail.wasvalid() and evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
return rep
else:
rep.outcome = "skipped"
elif call.when == "call":
rep.outcome = "failed"
else:
return rep
rep.wasxfail = evalxfail.getexplanation()
return rep
finally:
logreport(rep, result)
return rep
def get_formatted_error(report):
if isinstance(report.longrepr, tuple):
text = ""
for entry in report.longrepr:
text += colorize(entry)
else:
text = colorize(report.longrepr)
text = yatest_lib.tools.to_utf8(text)
return text
def colorize(longrepr):
# use default pytest colorization
if pytest.config.option.tbstyle != "short":
io = py.io.TextIO()
writer = py.io.TerminalWriter(file=io)
# enable colorization
writer.hasmarkup = True
if hasattr(longrepr, 'reprtraceback') and hasattr(longrepr.reprtraceback, 'toterminal'):
longrepr.reprtraceback.toterminal(writer)
return io.getvalue().strip()
return yatest_lib.tools.to_utf8(longrepr)
text = yatest_lib.tools.to_utf8(longrepr)
pos = text.find("E ")
if pos == -1:
return text
bt, error = text[:pos], text[pos:]
filters = [
# File path, line number and function name
(re.compile(r"^(.*?):(\d+): in (\S+)", flags=re.MULTILINE), r"[[unimp]]\1[[rst]]:[[alt2]]\2[[rst]]: in [[alt1]]\3[[rst]]"),
]
for regex, substitution in filters:
bt = regex.sub(substitution, bt)
return "{}[[bad]]{}".format(bt, error)
class TestItem(object):
def __init__(self, report, result, test_suffix):
self._result = result
self.nodeid = report.nodeid
self._class_name, self._test_name = tools.split_node_id(self.nodeid, test_suffix)
self._error = None
self._status = None
self._process_report(report)
self._duration = hasattr(report, 'duration') and report.duration or 0
self._keywords = getattr(report, "keywords", {})
def _process_report(self, report):
if report.longrepr:
self.set_error(report)
if hasattr(report, 'when') and report.when != "call":
self.set_error(report.when + " failed:\n" + self._error)
else:
self.set_error("")
if report.outcome == "passed":
self._status = 'good'
self.set_error("")
elif report.outcome == "skipped":
if hasattr(report, 'wasxfail'):
self._status = 'xfail'
self.set_error(report.wasxfail)
else:
self._status = 'skipped'
self.set_error(yatest_lib.tools.to_utf8(report.longrepr[-1]))
elif report.outcome == "failed":
if hasattr(report, 'wasxfail'):
self._status = 'xpass'
self.set_error("Test unexpectedly passed")
else:
self._status = 'fail'
@property
def status(self):
return self._status
def set_status(self, status):
self._status = status
@property
def test_name(self):
return tools.normalize_name(self._test_name)
@property
def class_name(self):
return tools.normalize_name(self._class_name)
@property
def error(self):
return self._error
def set_error(self, entry):
if isinstance(entry, _pytest.runner.BaseReport):
self._error = get_formatted_error(entry)
else:
self._error = "[[bad]]" + str(entry)
@property
def duration(self):
return self._duration
@property
def result(self):
if 'not_canonize' in self._keywords:
return None
return self._result
@property
def keywords(self):
return self._keywords
def __str__(self):
return "{}::{}".format(self.class_name, self.test_name)
class CustomTestItem(TestItem):
def __init__(self, nodeid, test_suffix, keywords=None):
self._result = None
self.nodeid = nodeid
self._class_name, self._test_name = tools.split_node_id(nodeid, test_suffix)
self._duration = 0
self._error = ""
self._keywords = keywords if keywords is not None else {}
class NotLaunchedTestItem(CustomTestItem):
def __init__(self, nodeid, test_suffix):
super(NotLaunchedTestItem, self).__init__(nodeid, test_suffix)
self._status = "not_launched"
class CrashedTestItem(CustomTestItem):
def __init__(self, nodeid, test_suffix):
super(CrashedTestItem, self).__init__(nodeid, test_suffix)
self._status = "crashed"
class DeselectedTestItem(CustomTestItem):
def __init__(self, nodeid, test_suffix):
super(DeselectedTestItem, self).__init__(nodeid, test_suffix)
self._status = "deselected"
class TraceReportGenerator(object):
def __init__(self, out_file_path):
self.File = open(out_file_path, 'w')
def on_start_test_class(self, test_item):
pytest.config.ya.set_test_item_node_id(test_item.nodeid)
self.trace('test-started', {'class': test_item.class_name.decode('utf-8') if sys.version_info[0] < 3 else test_item.class_name})
def on_finish_test_class(self, test_item):
pytest.config.ya.set_test_item_node_id(test_item.nodeid)
self.trace('test-finished', {'class': test_item.class_name.decode('utf-8') if sys.version_info[0] < 3 else test_item.class_name})
def on_start_test_case(self, test_item):
message = {
'class': yatest_lib.tools.to_utf8(test_item.class_name),
'subtest': yatest_lib.tools.to_utf8(test_item.test_name)
}
if test_item.nodeid in pytest.config.test_logs:
message['logs'] = pytest.config.test_logs[test_item.nodeid]
pytest.config.ya.set_test_item_node_id(test_item.nodeid)
self.trace('subtest-started', message)
def on_finish_test_case(self, test_item):
if test_item.result:
try:
result = canon.serialize(test_item.result[0])
except Exception as e:
yatest_logger.exception("Error while serializing test results")
test_item.set_error("Invalid test result: {}".format(e))
test_item.set_status("fail")
result = None
else:
result = None
message = {
'class': yatest_lib.tools.to_utf8(test_item.class_name),
'subtest': yatest_lib.tools.to_utf8(test_item.test_name),
'status': test_item.status,
'comment': self._get_comment(test_item),
'time': test_item.duration,
'result': result,
'metrics': pytest.config.test_metrics.get(test_item.nodeid),
'is_diff_test': 'diff_test' in test_item.keywords,
'tags': _get_item_tags(test_item),
}
if test_item.nodeid in pytest.config.test_logs:
message['logs'] = pytest.config.test_logs[test_item.nodeid]
self.trace('subtest-finished', message)
def on_error(self, test_item):
self.trace('suite_event', {"errors": [(test_item.status, self._get_comment(test_item))]})
@staticmethod
def _get_comment(test_item):
msg = yatest_lib.tools.to_utf8(test_item.error)
if not msg:
return ""
return msg + "[[rst]]"
def trace(self, name, value):
event = {
'timestamp': time.time(),
'value': value,
'name': name
}
data = json.dumps(event, ensure_ascii=False)
if sys.version_info[0] < 3 and isinstance(data, unicode):
data = data.encode("utf8")
self.File.write(data + '\n')
self.File.flush()
class DryTraceReportGenerator(TraceReportGenerator):
"""
Generator does not write any information.
"""
def __init__(self, *args, **kwargs):
pass
def trace(self, name, value):
pass
class Ya(object):
"""
Adds integration with ya, helps in finding dependencies
"""
def __init__(self, mode, source_root, build_root, dep_roots, output_dir, test_params, context, python_path, valgrind_path, gdb_path, data_root):
self._mode = mode
self._build_root = build_root
self._source_root = source_root or self._detect_source_root()
self._output_dir = output_dir or self._detect_output_root()
if not self._output_dir:
raise Exception("Run ya make -t before running test binary")
if not self._source_root:
logging.warning("Source root was not set neither determined, use --source-root to set it explicitly")
if not self._build_root:
if self._source_root:
self._build_root = self._source_root
else:
logging.warning("Build root was not set neither determined, use --build-root to set it explicitly")
if data_root:
self._data_root = data_root
elif self._source_root:
self._data_root = os.path.abspath(os.path.join(self._source_root, "..", "arcadia_tests_data"))
self._dep_roots = dep_roots
self._python_path = python_path
self._valgrind_path = valgrind_path
self._gdb_path = gdb_path
self._test_params = {}
self._context = {}
self._test_item_node_id = None
ram_disk_path = os.environ.get("DISTBUILD_RAM_DISK_PATH")
if ram_disk_path:
self._test_params["ram_drive_path"] = ram_disk_path
if test_params:
for p in test_params:
k, v = p.split("=", 1)
self._test_params[k] = v
self._context.update(context)
@property
def source_root(self):
return self._source_root
@property
def data_root(self):
return self._data_root
@property
def build_root(self):
return self._build_root
@property
def dep_roots(self):
return self._dep_roots
@property
def output_dir(self):
return self._output_dir
@property
def python_path(self):
return self._python_path or sys.executable
@property
def valgrind_path(self):
if not self._valgrind_path:
raise ValueError("path to valgrind was not pass correctly, use --valgrind-path to fix it")
return self._valgrind_path
@property
def gdb_path(self):
return self._gdb_path
def get_binary(self, *path):
assert self._build_root, "Build root was not set neither determined, use --build-root to set it explicitly"
path = list(path)
if os.name == "nt":
if not path[-1].endswith(".exe"):
path[-1] += ".exe"
for binary_path in [os.path.join(self.build_root, "bin", *path), os.path.join(self.build_root, *path)]:
if os.path.exists(binary_path):
yatest_logger.debug("Binary was found by %s", binary_path)
return binary_path
yatest_logger.debug("%s not found", binary_path)
error_message = "Cannot find binary '{binary}': make sure it was added in the DEPENDS section".format(binary=path)
yatest_logger.debug(error_message)
if self._mode == RunMode.Run:
raise TestMisconfigurationException(error_message)
def file(self, path, diff_tool=None, local=False, diff_file_name=None, diff_tool_timeout=None):
return canon.ExternalDataInfo.serialize_file(path, diff_tool=diff_tool, local=local, diff_file_name=diff_file_name, diff_tool_timeout=diff_tool_timeout)
def get_param(self, key, default=None):
return self._test_params.get(key, default)
def get_param_dict_copy(self):
return dict(self._test_params)
def get_context(self, key):
return self._context[key]
def _detect_source_root(self):
root = None
try:
import library.python.find_root
# try to determine source root from cwd
cwd = os.getcwd()
root = library.python.find_root.detect_root(cwd)
if not root:
# try to determine root pretending we are in the test work dir made from --keep-temps run
env_subdir = os.path.join("environment", "arcadia")
root = library.python.find_root.detect_root(cwd, detector=lambda p: os.path.exists(os.path.join(p, env_subdir)))
except ImportError:
logging.warning("Unable to import library.python.find_root")
return root
def _detect_output_root(self):
for p in [
# if run from kept test working dir
tools.TESTING_OUT_DIR_NAME,
# if run from source dir
os.path.join("test-results", os.path.basename(os.path.splitext(sys.argv[0])[0]), tools.TESTING_OUT_DIR_NAME),
]:
if os.path.exists(p):
return p
return None
def set_test_item_node_id(self, node_id):
self._test_item_node_id = node_id
def get_test_item_node_id(self):
assert self._test_item_node_id
return self._test_item_node_id
def set_metric_value(self, name, val):
node_id = self.get_test_item_node_id()
if node_id not in pytest.config.test_metrics:
pytest.config.test_metrics[node_id] = {}
pytest.config.test_metrics[node_id][name] = val
def get_metric_value(self, name, default=None):
res = pytest.config.test_metrics.get(self.get_test_item_node_id(), {}).get(name)
if res is None:
return default
return res
| 38.710699
| 178
| 0.64105
|
62f8c89bb21cde43701e188da8e2580eafd7f0d9
| 5,270
|
py
|
Python
|
mmdet/ops/dcn/modules/deform_conv.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
mmdet/ops/dcn/modules/deform_conv.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
mmdet/ops/dcn/modules/deform_conv.py
|
mrsempress/mmdetection
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
[
"Apache-2.0"
] | null | null | null |
import math
import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
from ..functions.deform_conv import deform_conv, modulated_deform_conv
class DeformConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=False):
super(DeformConv, self).__init__()
assert not bias
assert in_channels % groups == 0, \
'in_channels {} cannot be divisible by groups {}'.format(
in_channels, groups)
assert out_channels % groups == 0, \
'out_channels {} cannot be divisible by groups {}'.format(
out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // self.groups,
*self.kernel_size))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
def forward(self, x, offset):
return deform_conv(x, offset, self.weight, self.stride, self.padding,
self.dilation, self.groups, self.deformable_groups)
class DeformConvPack(DeformConv):
def __init__(self, *args, **kwargs):
super(DeformConvPack, self).__init__(*args, **kwargs)
self.conv_offset = nn.Conv2d(
self.in_channels,
self.deformable_groups * 2 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def forward(self, x, y=None):
offset = self.conv_offset(x) if y is None else self.conv_offset(y)
return deform_conv(x, offset, self.weight, self.stride, self.padding,
self.dilation, self.groups, self.deformable_groups)
class ModulatedDeformConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True):
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.deformable_groups = deformable_groups
self.with_bias = bias
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, x, offset, mask):
return modulated_deform_conv(x, offset, mask, self.weight, self.bias,
self.stride, self.padding, self.dilation,
self.groups, self.deformable_groups)
class ModulatedDeformConvPack(ModulatedDeformConv):
def __init__(self, *args, **kwargs):
super(ModulatedDeformConvPack, self).__init__(*args, **kwargs)
self.conv_offset_mask = nn.Conv2d(
self.in_channels,
self.deformable_groups * 3 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, x, y=None):
out = self.conv_offset_mask(x) if y is None else self.conv_offset_mask(y)
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return modulated_deform_conv(x, offset, mask, self.weight, self.bias,
self.stride, self.padding, self.dilation,
self.groups, self.deformable_groups)
| 33.566879
| 81
| 0.577609
|
81b9af2276473cae93fc1325c9b0f0d22dc4e288
| 3,883
|
py
|
Python
|
tests/test_schedule.py
|
rsketine/neon
|
a10f90546d2ddae68c3671f59ba9b513158a91f1
|
[
"Apache-2.0"
] | 4,415
|
2015-05-04T06:00:19.000Z
|
2022-03-22T13:38:45.000Z
|
tests/test_schedule.py
|
EquifAI/neon
|
a10f90546d2ddae68c3671f59ba9b513158a91f1
|
[
"Apache-2.0"
] | 446
|
2015-05-06T20:27:29.000Z
|
2021-05-29T03:41:06.000Z
|
tests/test_schedule.py
|
EquifAI/neon
|
a10f90546d2ddae68c3671f59ba9b513158a91f1
|
[
"Apache-2.0"
] | 1,060
|
2015-05-06T19:03:33.000Z
|
2022-02-13T07:43:01.000Z
|
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
from neon.optimizers import (Schedule, ExpSchedule, PowerSchedule, StepSchedule,
ShiftSchedule)
from utils import allclose_with_out
def test_schedule(backend_default):
"""
Test constant rate, fixed step and various modes of programmable steps.
"""
lr_init = 0.1
# default scheduler has a constant learning rate
sch = Schedule()
for epoch in range(10):
lr = sch.get_learning_rate(learning_rate=lr_init, epoch=epoch)
assert lr == lr_init
# test a uniform step schedule
step_config = 2
change = 0.5
sch = Schedule(step_config=step_config, change=change)
for epoch in range(10):
lr = sch.get_learning_rate(learning_rate=lr_init, epoch=epoch)
# test a repeated call for the same epoch
lr2 = sch.get_learning_rate(learning_rate=lr_init, epoch=epoch)
# print epoch, lr, lr2
assert allclose_with_out(lr, lr_init * change**(np.floor(epoch // step_config)))
assert allclose_with_out(lr2, lr_init * change**(np.floor(epoch // step_config)))
# test a list step schedule
sch = Schedule(step_config=[2, 3], change=.1)
assert allclose_with_out(.1, sch.get_learning_rate(learning_rate=.1, epoch=0))
assert allclose_with_out(.1, sch.get_learning_rate(learning_rate=.1, epoch=1))
assert allclose_with_out(.01, sch.get_learning_rate(learning_rate=.1, epoch=2))
# test a repeated call for the same epoch
assert allclose_with_out(.01, sch.get_learning_rate(learning_rate=.1, epoch=2))
assert allclose_with_out(.001, sch.get_learning_rate(learning_rate=.1, epoch=3))
assert allclose_with_out(.001, sch.get_learning_rate(learning_rate=.1, epoch=4))
def test_step_schedule(backend_default):
"""
Test the StepSchedule class
"""
step_config = [1, 4, 5]
change = [0.1, 0.3, 0.4]
sch = StepSchedule(step_config=step_config, change=change)
target_lr = [1.0, 0.1, 0.1, 0.1, 0.3, 0.4, 0.4, 0.4, 0.4]
for e, lr in enumerate(target_lr):
assert allclose_with_out(lr, sch.get_learning_rate(learning_rate=1.0, epoch=e))
def test_power_schedule(backend_default):
"""
Test the PowerSchedule class
"""
sch = PowerSchedule(step_config=2, change=0.5)
target_lr = [1.0, 1.0, 0.5, 0.5, 0.25, 0.25, 0.125, 0.125]
for e, lr in enumerate(target_lr):
assert allclose_with_out(lr, sch.get_learning_rate(learning_rate=1.0, epoch=e))
def test_exp_schedule(backend_default):
"""
Test exponential learning rate schedule
"""
lr_init = 0.1
decay = 0.01
sch = ExpSchedule(decay)
for epoch in range(10):
lr = sch.get_learning_rate(learning_rate=lr_init, epoch=epoch)
assert allclose_with_out(lr, lr_init / (1. + decay * epoch))
def test_shift_schedule(backend_default):
"""
Test binary shift learning rate schedule
"""
lr_init = 0.1
interval = 1
sch = ShiftSchedule(interval)
for epoch in range(10):
lr = sch.get_learning_rate(learning_rate=lr_init, epoch=epoch)
assert allclose_with_out(lr, lr_init / (2 ** epoch))
| 36.28972
| 89
| 0.664435
|
52aabf3313e10cb984dad43725b018a7624fcf4e
| 1,007
|
py
|
Python
|
fixture/application.py
|
serejkansk/python_training
|
4ffab2d66b3512154f47c988e4c6bebba49d5e32
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
serejkansk/python_training
|
4ffab2d66b3512154f47c988e4c6bebba49d5e32
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
serejkansk/python_training
|
4ffab2d66b3512154f47c988e4c6bebba49d5e32
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox(capabilities={"marionette": False})
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def open_home_page(self):
wd = self.wd
# open home page
wd.get(self.base_url)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def destroy(self):
self.wd.quit()
| 27.972222
| 75
| 0.598808
|
0afdd87277f82e2be41e3f8be1ec444b72e0e2da
| 126
|
py
|
Python
|
home/data/contests/proto_icpc/submissions/344742902_A-expected_team/ff.py
|
mooshak-dcc/mooshak-2
|
12cfb9a0461fe9f2d58507ee801b82ec9a97463c
|
[
"Artistic-2.0"
] | null | null | null |
home/data/contests/proto_icpc/submissions/344742902_A-expected_team/ff.py
|
mooshak-dcc/mooshak-2
|
12cfb9a0461fe9f2d58507ee801b82ec9a97463c
|
[
"Artistic-2.0"
] | null | null | null |
home/data/contests/proto_icpc/submissions/344742902_A-expected_team/ff.py
|
mooshak-dcc/mooshak-2
|
12cfb9a0461fe9f2d58507ee801b82ec9a97463c
|
[
"Artistic-2.0"
] | null | null | null |
b = int(input().strip())
m = int(input().strip())
def derivative(b, m):
return b * m ** (b - 1)
print(derivative(b, m))
| 15.75
| 27
| 0.555556
|
5754336730f76cde107b2b813b47e35254a45805
| 9,480
|
py
|
Python
|
tests/test_templating.py
|
skivis/BlackSheep
|
486f04ba2045f31dd3e188f52c45a275eb150967
|
[
"MIT"
] | null | null | null |
tests/test_templating.py
|
skivis/BlackSheep
|
486f04ba2045f31dd3e188f52c45a275eb150967
|
[
"MIT"
] | null | null | null |
tests/test_templating.py
|
skivis/BlackSheep
|
486f04ba2045f31dd3e188f52c45a275eb150967
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import List
import pytest
from blacksheep.server import Application
from blacksheep.server.controllers import Controller, RoutesRegistry
from blacksheep.server.templating import (
template_name,
use_templates,
view,
view_async,
model_to_view_params,
)
from jinja2 import PackageLoader
from pydantic import BaseModel
from .test_application import FakeApplication, MockReceive, MockSend, get_example_scope
def get_app(enable_async):
app = FakeApplication()
render = use_templates(
app,
loader=PackageLoader("tests.testapp", "templates"),
enable_async=enable_async,
)
return app, render
@pytest.fixture()
def home_model():
return {
"title": "Example",
"heading": "Hello World!",
"paragraph": "Lorem ipsum dolor sit amet",
}
@dataclass
class Sentence:
text: str
url: str
@dataclass
class HelloModel:
name: str
sentences: List[Sentence]
class Sentence2:
def __init__(self, text: str, url: str) -> None:
self.text = text
self.url = url
class HelloModel2:
def __init__(self, name: str, sentences: List[Sentence2]) -> None:
self.name = name
self.sentences = sentences
class PydanticSentence(BaseModel):
text: str
url: str
class PydanticHelloModel(BaseModel):
name: str
sentences: List[PydanticSentence]
def dataclass_model():
return HelloModel(
"World!",
[
Sentence(
"Check this out!",
"https://github.com/RobertoPrevato/BlackSheep",
)
],
)
def class_model():
return HelloModel2(
"World!",
[
Sentence2(
"Check this out!",
"https://github.com/RobertoPrevato/BlackSheep",
)
],
)
def pydantic_model():
return PydanticHelloModel(
name="World!",
sentences=[
PydanticSentence(
text="Check this out!",
url="https://github.com/RobertoPrevato/BlackSheep",
)
],
)
@pytest.fixture()
def specific_text():
return """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Specific Example</title>
</head>
<body>
<h1>Hello World!</h1>
<p>Lorem ipsum dolor sit amet</p>
</body>
</html>"""
nomodel_text = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Example</title>
</head>
<body>
<h1>Hello World!</h1>
<p>Lorem ipsum dolor sit amet</p>
</body>
</html>"""
async def _home_scenario(app: FakeApplication, url="/", expected_text=None):
app.build_services()
app.normalize_handlers()
await app(get_example_scope("GET", url), MockReceive(), MockSend())
text = await app.response.text()
if expected_text is None:
expected_text = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Example</title>
</head>
<body>
<h1>Hello World!</h1>
<p>Lorem ipsum dolor sit amet</p>
</body>
</html>"""
assert text == expected_text
assert app.response.status == 200
async def _view_scenario(app: FakeApplication, expected_text, url="/"):
app.build_services()
app.normalize_handlers()
await app(get_example_scope("GET", url), MockReceive(), MockSend())
text = await app.response.text()
assert text == expected_text
assert app.response.status == 200
@pytest.mark.asyncio
async def test_jinja_async_mode(home_model):
app, render = get_app(True)
@app.router.get("/")
async def home():
return await render("home", home_model)
await _home_scenario(app)
@pytest.mark.asyncio
async def test_jinja_sync_mode(home_model):
app, render = get_app(False)
@app.router.get("/")
async def home():
return render("home", home_model)
await _home_scenario(app)
@pytest.mark.asyncio
async def test_jinja_async_mode_with_verbose_method(home_model):
app, _ = get_app(True)
@app.router.get("/")
async def home(jinja):
return await view_async(jinja, "home", home_model)
await _home_scenario(app)
@pytest.mark.asyncio
async def test_jinja_sync_mode_with_verbose_method(home_model):
app, _ = get_app(False)
@app.router.get("/")
async def home(jinja):
return view(jinja, "home", home_model)
await _home_scenario(app)
@pytest.mark.asyncio
async def test_controller_conventional_view_name(home_model):
app, _ = get_app(False)
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Lorem(Controller):
@get()
def index(self):
return self.view(model=home_model)
app.setup_controllers()
await _home_scenario(app)
@pytest.mark.asyncio
async def test_controller_conventional_view_name_async(home_model):
app, _ = get_app(True)
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Lorem(Controller):
@get()
async def index(self):
return await self.view_async(model=home_model)
app.setup_controllers()
await _home_scenario(app)
@pytest.mark.asyncio
async def test_controller_specific_view_name(home_model, specific_text):
app, _ = get_app(False)
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Lorem(Controller):
@get()
def index(self):
return self.view("specific", home_model)
app.setup_controllers()
await _home_scenario(app, expected_text=specific_text)
@pytest.mark.asyncio
async def test_controller_specific_view_name_async(home_model, specific_text):
app, _ = get_app(True)
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Lorem(Controller):
@get()
async def index(self):
return await self.view_async("specific", model=home_model)
app.setup_controllers()
await _home_scenario(app, expected_text=specific_text)
@pytest.mark.asyncio
async def test_controller_specific_view_name_async_no_model():
app, _ = get_app(True)
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Lorem(Controller):
@get()
async def index(self):
return await self.view_async("nomodel")
app.setup_controllers()
await _home_scenario(app, expected_text=nomodel_text)
@pytest.mark.asyncio
async def test_controller_conventional_view_name_no_model(home_model):
app, _ = get_app(False)
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Lorem(Controller):
@get(...)
def nomodel(self):
return self.view()
app.setup_controllers()
await _home_scenario(app, "/nomodel", expected_text=nomodel_text)
@pytest.mark.asyncio
async def test_controller_conventional_view_name_sub_function(home_model):
app, _ = get_app(False)
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Lorem(Controller):
def ufo(self, model):
return self.foo(model)
def foo(self, model):
return self.view(model=model)
@get()
def index(self):
return self.ufo(home_model)
app.setup_controllers()
await _home_scenario(app)
@pytest.mark.asyncio
async def test_controller_conventional_view_name_extraneous_function(home_model):
app, _ = get_app(False)
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
def extraneous(controller, model):
return controller.view(model=model)
class Lorem(Controller):
def ufo(self, model):
return self.foo(model)
def foo(self, model):
return extraneous(self, model)
@get()
def index(self):
return self.ufo(home_model)
app.setup_controllers()
await _home_scenario(app)
@pytest.mark.parametrize(
"value,expected_name",
[
("index", "index.html"),
("index.html", "index.html"),
("default", "default.html"),
],
)
def test_template_name(value, expected_name):
assert template_name(value) == expected_name
def test_use_templates_throws_for_invalid_services():
app = Application(services={}) # type: ignore
with pytest.raises(TypeError):
use_templates(
app, loader=PackageLoader("tests.testapp", "templates"), enable_async=False
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_fixture",
[
class_model,
dataclass_model,
pydantic_model,
],
)
async def test_controller_model_interop(model_fixture):
app, _ = get_app(False)
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Lorem(Controller):
@get()
def index(self):
return self.view("hello", model_fixture())
app.setup_controllers()
await _view_scenario(
app,
expected_text='<div style="margin: 10em 2em;">\n <h1>Hello, World!!</h1>\n\n'
+ ' <ul>\n \n <li><a href="https://github.com/RobertoPrevato/'
+ 'BlackSheep">Check this out!</a></li>\n \n </ul>\n</div>',
)
def test_model_to_view_params_passes_unhandled_argument():
assert model_to_view_params(2) == 2
assert model_to_view_params("Something") == "Something"
| 23.349754
| 87
| 0.654958
|
6c7a7ec1925431138fbbbd5566e76ad5b6ac4e51
| 1,499
|
py
|
Python
|
src/symbols/metrics.py
|
stupiding/insightface
|
85a3b65c07b39e7ad02aabddd6cb6529baf4e605
|
[
"MIT"
] | null | null | null |
src/symbols/metrics.py
|
stupiding/insightface
|
85a3b65c07b39e7ad02aabddd6cb6529baf4e605
|
[
"MIT"
] | null | null | null |
src/symbols/metrics.py
|
stupiding/insightface
|
85a3b65c07b39e7ad02aabddd6cb6529baf4e605
|
[
"MIT"
] | null | null | null |
import mxnet as mx
class AccMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(AccMetric, self).__init__(
'acc', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
self.count = 0
def update(self, labels_ls, preds_ls):
self.count+=1
labels = [labels_ls[0][:, i] for i in range(len(preds_ls) - 1)] if len(preds_ls) > 2 else labels_ls
for label, pred_label in zip(labels, preds_ls[1:]):
if pred_label.shape != label.shape:
pred_label = mx.ndarray.argmax(pred_label, axis=self.axis)
pred_label = pred_label.asnumpy().astype('int32').flatten()
label = label.asnumpy()
if label.ndim==2:
label = label[:,0]
label = label.astype('int32').flatten()
assert label.shape==pred_label.shape
pred_label, label = pred_label.flat, label.flat
#valid_ids = np.argwhere(label.asnumpy() != -1)
self.sum_metric += (pred_label == label).sum()
self.num_inst += len(pred_label)
class LossValueMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(LossValueMetric, self).__init__(
'lossvalue', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
def update(self, labels, preds):
print(labels[0].shape, preds[0].shape)
loss = preds[-1].asnumpy()[0]
self.sum_metric += loss
self.num_inst += 1.0
gt_label = preds[-2].asnumpy()
#print(gt_label)
| 34.068182
| 103
| 0.63042
|
07a0423a6ac3ced26660e2c8083f51cb22ffc9ea
| 2,301
|
py
|
Python
|
paddleseg/utils/__init__.py
|
shiyutang/UperNet
|
836762b7c0c46023f091d991b5cde7df2dda8bd6
|
[
"Apache-2.0"
] | null | null | null |
paddleseg/utils/__init__.py
|
shiyutang/UperNet
|
836762b7c0c46023f091d991b5cde7df2dda8bd6
|
[
"Apache-2.0"
] | null | null | null |
paddleseg/utils/__init__.py
|
shiyutang/UperNet
|
836762b7c0c46023f091d991b5cde7df2dda8bd6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import logger
from . import download
from . import metrics
from .env import seg_env, get_sys_env
from .utils import *
from .timer import TimeAverager, calculate_eta
from . import visualize
from .config_check import config_check
def get_image_list(image_path):
"""Get image list"""
valid_suffix = [
'.JPEG', '.jpeg', '.JPG', '.jpg', '.BMP', '.bmp', '.PNG', '.png'
]
image_list = []
image_dir = None
if os.path.isfile(image_path):
if os.path.splitext(image_path)[-1] in valid_suffix:
image_list.append(image_path)
else:
image_dir = os.path.dirname(image_path)
with open(image_path, 'r') as f:
for line in f:
line = line.strip()
if len(line.split()) > 1:
line = line.split()[0]
image_list.append(os.path.join(image_dir, line))
elif os.path.isdir(image_path):
image_dir = image_path
for root, dirs, files in os.walk(image_path):
for f in files:
if '.ipynb_checkpoints' in root:
continue
if os.path.splitext(f)[-1] in valid_suffix:
image_list.append(os.path.join(root, f))
else:
raise FileNotFoundError(
'`--image_path` is not found. it should be a path of image, or a file list containing image paths, or a directory including images.'
)
if len(image_list) == 0:
raise RuntimeError(
'There are not image file in `--image_path`={}'.format(image_path))
return image_list, image_dir
| 39
| 145
| 0.614081
|
1c900fc0838c54d44ce13436acd2cd47eea0b271
| 421
|
py
|
Python
|
backend/server/apps/database/views.py
|
luke7ucas/django-js-boilerplate
|
26da62a3fce99d2a1fec3360f18ad7408334d84c
|
[
"MIT"
] | null | null | null |
backend/server/apps/database/views.py
|
luke7ucas/django-js-boilerplate
|
26da62a3fce99d2a1fec3360f18ad7408334d84c
|
[
"MIT"
] | null | null | null |
backend/server/apps/database/views.py
|
luke7ucas/django-js-boilerplate
|
26da62a3fce99d2a1fec3360f18ad7408334d84c
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
# Create your views here.
def index(request):
# If no user is signed in, return to login page:
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
return render(request, "database/managedb.html")
| 38.272727
| 59
| 0.774347
|
dceccf9fbca94942834c6e76bcdd868a43fef399
| 4,453
|
py
|
Python
|
BilinearUpSampling.py
|
x65han/Behavioral-Cloning
|
2e399e3c59295beac165918f852e53aa7f4ce39b
|
[
"MIT"
] | 1
|
2018-04-01T16:27:48.000Z
|
2018-04-01T16:27:48.000Z
|
BilinearUpSampling.py
|
x65han/Behavioral-Cloning
|
2e399e3c59295beac165918f852e53aa7f4ce39b
|
[
"MIT"
] | null | null | null |
BilinearUpSampling.py
|
x65han/Behavioral-Cloning
|
2e399e3c59295beac165918f852e53aa7f4ce39b
|
[
"MIT"
] | null | null | null |
import keras.backend as K
import tensorflow as tf
from keras.layers import *
def resize_images_bilinear(X, height_factor=1, width_factor=1, target_height=None, target_width=None, data_format='default'):
'''Resizes the images contained in a 4D tensor of shape
- [batch, channels, height, width] (for 'channels_first' data_format)
- [batch, height, width, channels] (for 'channels_last' data_format)
by a factor of (height_factor, width_factor). Both factors should be
positive integers.
'''
if data_format == 'default':
data_format = K.image_data_format()
if data_format == 'channels_first':
original_shape = K.int_shape(X)
if target_height and target_width:
new_shape = tf.constant(np.array((target_height, target_width)).astype('int32'))
else:
new_shape = tf.shape(X)[2:]
new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
X = permute_dimensions(X, [0, 2, 3, 1])
X = tf.image.resize_bilinear(X, new_shape)
X = permute_dimensions(X, [0, 3, 1, 2])
if target_height and target_width:
X.set_shape((None, None, target_height, target_width))
else:
X.set_shape((None, None, original_shape[2] * height_factor, original_shape[3] * width_factor))
return X
elif data_format == 'channels_last':
original_shape = K.int_shape(X)
if target_height and target_width:
new_shape = tf.constant(np.array((target_height, target_width)).astype('int32'))
else:
new_shape = tf.shape(X)[1:3]
new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
X = tf.image.resize_bilinear(X, new_shape)
if target_height and target_width:
X.set_shape((None, target_height, target_width, None))
else:
X.set_shape((None, original_shape[1] * height_factor, original_shape[2] * width_factor, None))
return X
else:
raise Exception('Invalid data_format: ' + data_format)
class BilinearUpSampling2D(Layer):
def __init__(self, size=(1, 1), target_size=None, data_format='default', **kwargs):
if data_format == 'default':
data_format = K.image_data_format()
self.size = tuple(size)
if target_size is not None:
self.target_size = tuple(target_size)
else:
self.target_size = None
assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {tf, th}'
self.data_format = data_format
self.input_spec = [InputSpec(ndim=4)]
super(BilinearUpSampling2D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
width = int(self.size[0] * input_shape[2] if input_shape[2] is not None else None)
height = int(self.size[1] * input_shape[3] if input_shape[3] is not None else None)
if self.target_size is not None:
width = self.target_size[0]
height = self.target_size[1]
return (input_shape[0],
input_shape[1],
width,
height)
elif self.data_format == 'channels_last':
width = int(self.size[0] * input_shape[1] if input_shape[1] is not None else None)
height = int(self.size[1] * input_shape[2] if input_shape[2] is not None else None)
if self.target_size is not None:
width = self.target_size[0]
height = self.target_size[1]
return (input_shape[0],
width,
height,
input_shape[3])
else:
raise Exception('Invalid data_format: ' + self.data_format)
def call(self, x, mask=None):
if self.target_size is not None:
return resize_images_bilinear(x, target_height=self.target_size[0], target_width=self.target_size[1], data_format=self.data_format)
else:
return resize_images_bilinear(x, height_factor=self.size[0], width_factor=self.size[1], data_format=self.data_format)
def get_config(self):
config = {'size': self.size, 'target_size': self.target_size}
base_config = super(BilinearUpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 47.37234
| 143
| 0.627218
|
2950149e7108530926e9b9e78130961c49dd0796
| 1,556
|
py
|
Python
|
pixUtils/bashIt.py
|
vishnu-chand/videoMosaic
|
58513f372f794597611c9e805c2aebc58e2fda0e
|
[
"MIT"
] | null | null | null |
pixUtils/bashIt.py
|
vishnu-chand/videoMosaic
|
58513f372f794597611c9e805c2aebc58e2fda0e
|
[
"MIT"
] | null | null | null |
pixUtils/bashIt.py
|
vishnu-chand/videoMosaic
|
58513f372f794597611c9e805c2aebc58e2fda0e
|
[
"MIT"
] | null | null | null |
import json
import subprocess
def decodeCmd(cmd, sepBy):
cmd = [cmd.strip() for cmd in cmd.split('\n')]
cmd = [cmd for cmd in cmd if cmd and not cmd.startswith('#')]
cmd = sepBy.join(cmd)
return cmd
class Error(Exception):
def __init__(self, cmd, stdout, stderr):
super(Error, self).__init__(f'{cmd} error (see stderr output for detail)')
self.stdout = stdout
self.stderr = stderr
def exeIt(cmd, returnStdout=True, returnStderr=True, input=None, sepBy=' ', debug=False):
pipe_stdin = None # implement streaming input
stdin_stream = subprocess.PIPE if pipe_stdin else None
stdout_stream = subprocess.PIPE if returnStdout else None
stderr_stream = subprocess.PIPE if returnStderr else None
cmd = decodeCmd(cmd, sepBy)
if debug:
print(f"\n{'_' * 100}\nbash cmd: {cmd}\n{'_' * 100}\n")
process = subprocess.Popen(cmd, shell=True, stdin=stdin_stream, stdout=stdout_stream, stderr=stderr_stream)
out, err = process.communicate(input)
retcode = process.poll()
# if retcode:
# raise Error('ffmpeg', out, err)
if out is not None:
out = out.decode()
if err is not None:
err = err.decode()
return retcode, out, err
def curlIt(data, host='', port='', call='', url='', method='POST', timeout=60, debug=False):
if not url:
url = f'{host}:{port}'
if call:
url = f'{url}/{call}'
curlCmd = f"curl -X {method} '{url}' -d '{json.dumps(data)}' -m {timeout}"
return exeIt(cmd=curlCmd, sepBy='', debug=debug)
| 33.826087
| 111
| 0.636247
|
9ca654fdd16543e9b47b7306b39c7fc995e892fc
| 1,628
|
py
|
Python
|
peeringdb_server/management/commands/pdb_deskpro_publish.py
|
jejenone/peeringdb
|
1d6b616efaaf9e33b6b200954533248ed1f16c4d
|
[
"BSD-2-Clause"
] | null | null | null |
peeringdb_server/management/commands/pdb_deskpro_publish.py
|
jejenone/peeringdb
|
1d6b616efaaf9e33b6b200954533248ed1f16c4d
|
[
"BSD-2-Clause"
] | null | null | null |
peeringdb_server/management/commands/pdb_deskpro_publish.py
|
jejenone/peeringdb
|
1d6b616efaaf9e33b6b200954533248ed1f16c4d
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from django.core.management.base import BaseCommand
from django.conf import settings
from peeringdb_server import models
from peeringdb_server.deskpro import APIClient, APIError
class Command(BaseCommand):
help = "Process deskpro ticket queue"
def add_arguments(self, parser):
pass
def log(self, msg):
print(msg)
def handle(self, *args, **options):
client = APIClient(settings.DESKPRO_URL, settings.DESKPRO_KEY)
self.log(u"DESKPRO: {}".format(settings.DESKPRO_URL))
ticket_qs = models.DeskProTicket.objects.filter(
published__isnull=True).order_by("created")
if not ticket_qs.count():
self.log("No tickets in queue")
return
for ticket in ticket_qs[:10]:
self.log(u"Posting to Deskpro: #{}".format(ticket.id))
try:
client.create_ticket(ticket)
ticket.published = datetime.datetime.now().replace(
tzinfo=models.UTC())
ticket.save()
except APIError as exc:
self.log(
u"!!!! Could not create ticket #{} - error data has been attached to ticket body.".
format(ticket.id))
ticket.published = datetime.datetime.now().replace(
tzinfo=models.UTC())
ticket.subject = u"[FAILED] {}".format(ticket.subject)
ticket.body = u"{}\nAPI Delivery Error: {}".format(
ticket.body, exc.data)
ticket.save()
| 33.22449
| 103
| 0.581081
|
2d76eb3a9d1a59b5e660757f550b4b599e2837b7
| 1,271
|
py
|
Python
|
var/spack/repos/builtin/packages/r-shinyjs/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/r-shinyjs/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/r-shinyjs/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RShinyjs(RPackage):
"""Easily Improve the User Experience of Your Shiny Apps in Seconds
Perform common useful JavaScript operations in Shiny apps that will greatly
improve your apps without having to know any JavaScript. Examples include:
hiding an element, disabling an input, resetting an input back to its
original value, delaying code execution by a few seconds, and many more
useful functions for both the end user and the developer. 'shinyjs' can
also be used to easily call your own custom JavaScript functions from R."""
homepage = "https://deanattali.com/shinyjs/"
cran = "shinyjs"
version('2.0.0', sha256='c2cdd9fab41f6b46bb41b288cd9b3fb3a7fe9627b664e3a58a0cb5dd4c19f8ff')
depends_on('r@3.1.0:', type=('build', 'run'))
depends_on('r-digest@0.6.8:', type=('build', 'run'))
depends_on('r-htmltools@0.2.9:', type=('build', 'run'))
depends_on('r-jsonlite', type=('build', 'run'))
depends_on('r-shiny@1.0.0:', type=('build', 'run'))
depends_on('pandoc', type='build')
| 42.366667
| 95
| 0.712038
|
5afecdf7a2a9b216e64ab067c6dca514a457c27d
| 141,036
|
py
|
Python
|
astropy/table/table.py
|
cosmicoder/astropy
|
c4e9a703af2b0ffb2c65c3b195c9af3b9d902819
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/table/table.py
|
cosmicoder/astropy
|
c4e9a703af2b0ffb2c65c3b195c9af3b9d902819
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/table/table.py
|
cosmicoder/astropy
|
c4e9a703af2b0ffb2c65c3b195c9af3b9d902819
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import TableIndices, TableLoc, TableILoc, TableLocIndices
import sys
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
import warnings
from copy import deepcopy
import types
import itertools
import numpy as np
from numpy import ma
from astropy import log
from astropy.units import Quantity, QuantityInfo
from astropy.utils import isiterable, ShapedLikeNDArray
from astropy.utils.console import color_print
from astropy.utils.metadata import MetaData, MetaAttribute
from astropy.utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from astropy.utils.decorators import format_doc
from astropy.io.registry import UnifiedReadWriteMethod
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from .connect import TableRead, TableWrite
from . import conf
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = ['Table.read', 'Table.write', 'Table._read',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
__doctest_requires__ = {'*pandas': ['pandas']}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or `None`
Maximum number of lines in table output.
max_width : int or `None`
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, dict):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError("Cannot replace column '{}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableReadWrite:
def __get__(self, instance, owner_cls):
if instance is None:
# This is an unbound descriptor on the class
info = self
info._parent_cls = owner_cls
else:
info = instance.__dict__.get('info')
if info is None:
info = instance.__dict__['info'] = self.__class__(bound=True)
info._parent = instance
return info
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: http://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, 'mask'):
data[col.info.name].mask = col.mask
return data
def __init__(self, data=None, masked=False, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
units=None, descriptions=None,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
# Handle custom (subclass) table attributes that are stored in meta.
# These are defined as class attributes using the MetaAttribute
# descriptor. Any such attributes get removed from kwargs here.
if kwargs:
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, TableAttribute):
setattr(self, attr, kwargs.pop(attr))
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray)
and data.shape == (0,)
and not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (names_from_list_of_dict
or _get_names_from_list_of_dict(data))
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
return
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if dtype is None:
dtype = [None] * n_cols
elif isinstance(dtype, np.dtype):
if default_names is None:
default_names = dtype.names
# Convert a numpy dtype input to a list of dtypes for later use.
dtype = [dtype[name] for name in dtype.names]
if names is None:
names = default_names or [None] * n_cols
# Numpy does not support bytes column names on Python 3, so fix them
# up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute('unit', units)
self._set_column_attribute('description', descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, dict):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(f'sequence of {attr} values must match number of columns')
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(f'invalid column name {name} for setting {attr} attribute')
# Special case: ignore unit if it is an empty or blank string
if attr == 'unit' and isinstance(value, str):
if value.strip() == '':
value = None
if value not in (np.ma.masked, None):
setattr(self[name].info, attr, value)
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table([getattr(col, 'mask', FalseArray(col.shape))
for col in self.itercols()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [col.filled(fill_value) if hasattr(col, 'filled') else col
for col in self.itercols()]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, FastRBT, and SCEngine. If the supplied argument is None
(by default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{}", of '
'type "{}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError(f'{inp_str} must be a list or None')
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
# Put names into a preferred order, either using the first row of data
# if it is ordered, or alphabetically. Starting with Python 3.7, dict
# is ordered so this test can be relaxed. (In practice CPython 3.6 is
# this way, but not according to the formal spec).
if (isinstance(data[0], OrderedDict)
and set(data[0].keys()) == names_from_data):
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dtype in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dtype, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(self, data, copy=True, default_name=None, dtype=None, name=None):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# Structured ndarray gets viewed as a mixin unless already a valid
# mixin class
if (not isinstance(data, Column) and not data_is_mixin
and isinstance(data, np.ndarray) and len(data.dtype) > 1):
data = data.view(NdarrayMixin)
data_is_mixin = True
# Get the final column name using precedence. Some objects may not
# have an info attribute.
if not name:
if hasattr(data, 'info'):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
col = col_copy(data, copy_indices=self._init_indices) if copy else data
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
col.info.indices = []
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, np.ma.MaskedArray):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif not hasattr(data, 'dtype'):
# If value doesn't have a dtype then convert to a masked numpy array.
# Then check if there were any masked elements. This logic is handling
# normal lists like [1, 2] but also odd-ball cases like a list of masked
# arrays (see #8977). Use np.ma.array() to do the heavy lifting.
try:
np_data = np.ma.array(data, dtype=dtype)
except Exception:
# Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity
np_data = np.ma.array(data, dtype=object)
if np_data.ndim > 0 and len(np_data) == 0:
# Implies input was an empty list (e.g. initializing an empty table
# with pre-declared names and dtypes but no data). Here we need to
# fall through to initializing with the original data=[].
col_cls = self.ColumnClass
else:
if np_data.mask is np.ma.nomask:
data = np_data.data
col_cls = self.ColumnClass
else:
data = np_data
col_cls = masked_col_cls
copy = False
else:
# `data` is none of the above, so just go for it and try init'ing Column
col_cls = self.ColumnClass
try:
col = col_cls(name=name, data=data, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError('unable to convert data to Column for Table')
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) > 1:
raise ValueError('Inconsistent data column lengths: {}'
.format(lengths))
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(table, newcols, verify=False, names=self.columns.keys())
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError('Cannot have None for column name')
if len(set(names)) != len(names):
raise ValueError('Duplicate column names')
table.columns = table.TableColumns((name, col) for name, col in zip(names, cols))
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={}'.format(len(self)))
descr = ' '.join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = '<i>{}</i>\n'.format(xml_escape(descr))
else:
descr = f'<{descr}>\n'
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
for col in self.itercols():
if hasattr(col, 'mask') and np.any(col.mask):
return True
else:
return False
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(max_lines, max_width, show_name,
show_unit, show_dtype, align)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + list(self.columns.values()),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{}-{}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.info.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin('file:', pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {} rows'.format(len(self)))
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(max_lines, max_width, show_name,
show_unit, show_dtype, html, tableid,
align, tableclass)
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif ((isinstance(item, np.ndarray) and item.size == 0)
or (isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or (isinstance(item, tuple) # output from np.where
and all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray))
and all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray))
and np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names
and all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def values(self):
return self.columns.values()
def items(self):
return self.columns.items()
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True,
default_name=None):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or `None`
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or `None`
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = 'col{}'.format(len(self.columns))
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(col, name=name, copy=copy,
default_name=default_name)
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
if (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, 'shape', ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape,
subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape,
subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError('Inconsistent data column lengths')
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + '_' + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of objects
List of data objects for the new columns
indexes : list of ints or `None`
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
default_names = ['col{}'.format(ii + len(self.columns))
for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes)):
self.add_column(cols[ii], index=indexes[ii], name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate, copy=copy)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn(f"replaced column '{name}'",
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
copy : bool
Make copy of the input ``col``, default=True
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f'column name {name} is not in the table')
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError('length of new column must match table length')
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterator returning tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f'{name} is not a valid column name')
cols = (self[name] for name in names)
out = zip(*cols)
return out
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError(f"Column {name} does not exist")
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, 'utf-8'))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in col.info.attr_names - col.info._attrs_no_copy - set(['dtype']):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype('S', 'U', np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype('U', 'S', np.char.encode)
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError(f"Column {name} does not exist")
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
'''
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
'''
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError("input 'new_names' must be a tuple or a list of column names")
if len(names) != len(new_names):
raise ValueError("input 'names' and 'new_names' list arguments must be the same length")
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {} is out of bounds for table with length {}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if mask_ and isinstance(col, Column) and not isinstance(col, MaskedColumn):
col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {} after inserting {}'
' (expected {}, got {})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, 'mask'):
newcol[index] = np.ma.masked
else:
raise TypeError("mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name))
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{}':\n{}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is much slower than ndarray, e.g. a
# factor of ~6 for a 10 million long random array.
if len(keys) > 1:
kwargs['order'] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]].view(np.ndarray)
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs['kind'] = kind
idx = data.argsort(**kwargs)
return idx[::-1] if reverse else idx
def sort(self, keys=None, reverse=False):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys)
if reverse:
indexes = indexes[::-1]
with self.index_mode('freeze'):
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
def round(self, decimals=0):
'''
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
'''
if isinstance(decimals, dict):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : Table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError('cannot compare tables with different column names')
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
eq = self[name] == other[name]
if (warns and issubclass(warns[-1].category, FutureWarning)
and 'elementwise comparison failed' in str(warns[-1].message)):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f'unable to compare column {name}') from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (isinstance(eq, np.ndarray)
and eq.dtype is np.dtype('bool')
and len(eq) == len(self)):
raise TypeError(f'comparison for column {name} returned {eq} '
f'instead of the expected boolean ndarray')
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None, use_nullable_int=True):
"""
Return a :class:`pandas.DataFrame` instance
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In addition to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Parameters
----------
index : None, bool, str
Specify DataFrame index mode
use_nullable_int : bool, default=True
Convert integer MaskedColumn to pandas nullable integer type.
If ``use_nullable_int=False`` or the pandas version does not support
nullable integer types (version < 0.24), then the column is converted
to float with NaN for missing elements and a warning is issued.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 00:00:03
2002-01-01 2.0 6.0 8.0 00:03:20
"""
from pandas import DataFrame, Series
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError('index must be None, False, True or a table '
'column name')
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from . import serialize
from astropy.time import Time, TimeDelta
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, Time)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype('timedelta64[ns]')
nat = np.timedelta64('NaT')
else:
new_col = col.datetime64.copy()
nat = np.datetime64('NaT')
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)')
out = OrderedDict()
for name, column in tbl.columns.items():
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ['i', 'u']:
pd_dtype = str(column.dtype)
if use_nullable_int:
# Convert int64 to Int64, uint32 to UInt32, etc for nullable types
pd_dtype = pd_dtype.replace('i', 'I').replace('u', 'U')
out[name] = Series(column, dtype=pd_dtype)
# If pandas is older than 0.24 the type may have turned to float
if column.dtype.kind != out[name].dtype.kind:
warnings.warn(
f"converted column '{name}' from {column.dtype} to {out[name].dtype}",
TableReplaceWarning, stacklevel=3)
elif column.dtype.kind in ['f', 'c']:
out[name] = column
else:
out[name] = column.astype(object).filled(np.nan)
else:
out[name] = column
if (hasattr(out[name].dtype, 'byteorder')
and out[name].dtype.byteorder not in ('=', '|')):
out[name] = out[name].byteswap().newbyteorder()
kwargs = {'index': out.pop(index)} if index else {}
return DataFrame(out, **kwargs)
@classmethod
def from_pandas(cls, dataframe, index=False, units=None):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
units: dict
A dict mapping column names to to a `~astropy.units.Unit`.
The columns will have the specified unit in the Table.
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 00:00:01 3.0
1 2002-01-01 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
object object float64
----------------------- ------ -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or 'index'
while index_name in names:
index_name = '_' + index_name + '_'
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
if units is None:
units = [None] * len(names)
else:
if not isinstance(units, Mapping):
raise TypeError('Expected a Mapping "column-name" -> "unit"')
not_found = set(units.keys()) - set(names)
if not_found:
warnings.warn('`units` contains additionial columns: {}'.format(
not_found
))
units = [units.get(name) for name in names]
for name, column, data, mask, unit in zip(names, columns, datas, masks, units):
if column.dtype.kind in ['u', 'i'] and np.any(mask):
# Special-case support for pandas nullable int
np_dtype = str(column.dtype).lower()
data = np.zeros(shape=column.shape, dtype=np_dtype)
data[~mask] = column[~mask]
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit, copy=False)
continue
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == 'M':
from astropy.time import Time
out[name] = Time(data, format='datetime64')
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = 'isot'
# Numpy timedelta64
elif data.dtype.kind == 'm':
from astropy.time import TimeDelta
data_sec = data.astype('timedelta64[ns]').astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format='sec')
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)
else:
out[name] = Column(data=data, name=name, unit=unit)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
See also:
- http://docs.astropy.org/en/stable/table/
- http://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, 'unit', None) is not None:
# What to do with MaskedColumn with units: leave as MaskedColumn or
# turn into Quantity and drop mask? Assuming we have masking support
# in Quantity someday, let's drop the mask (consistent with legacy
# behavior) but issue a warning.
if isinstance(col, MaskedColumn) and np.any(col.mask):
warnings.warn("dropping mask in Quantity column '{}': "
"masked Quantity not supported".format(col.info.name))
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
class TableAttribute(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
OrderedDict([('__attributes__', {'identifier': 10})])
"""
| 37.760643
| 100
| 0.558006
|
f9309368d5b7bac11c6f7855bac99b2cec8d37db
| 9,331
|
py
|
Python
|
datasets/mutual_friends/mutual_friends.py
|
PierreColombo/datasets
|
c22ec7e64edb6596a6ff5894712dea4dc5441de8
|
[
"Apache-2.0"
] | 1
|
2021-07-29T06:28:28.000Z
|
2021-07-29T06:28:28.000Z
|
datasets/mutual_friends/mutual_friends.py
|
norabelrose/datasets
|
b0511c65b32d1103d34cb5ac9ffb50e9cf387843
|
[
"Apache-2.0"
] | null | null | null |
datasets/mutual_friends/mutual_friends.py
|
norabelrose/datasets
|
b0511c65b32d1103d34cb5ac9ffb50e9cf387843
|
[
"Apache-2.0"
] | 1
|
2021-05-04T16:54:23.000Z
|
2021-05-04T16:54:23.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mutual friends dataset."""
import json
import datasets
_CITATION = """\
@inproceedings{he-etal-2017-learning,
title = "Learning Symmetric Collaborative Dialogue Agents with Dynamic Knowledge Graph Embeddings",
author = "He, He and
Balakrishnan, Anusha and
Eric, Mihail and
Liang, Percy",
booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P17-1162",
doi = "10.18653/v1/P17-1162",
pages = "1766--1776",
abstract = "We study a \textit{symmetric collaborative dialogue} setting in which two agents, each with private knowledge, must strategically communicate to achieve a common goal. The open-ended dialogue state in this setting poses new challenges for existing dialogue systems. We collected a dataset of 11K human-human dialogues, which exhibits interesting lexical, semantic, and strategic elements. To model both structured knowledge and unstructured language, we propose a neural model with dynamic knowledge graph embeddings that evolve as the dialogue progresses. Automatic and human evaluations show that our model is both more effective at achieving the goal and more human-like than baseline neural and rule-based models.",
}
"""
_DESCRIPTION = """\
Our goal is to build systems that collaborate with people by exchanging
information through natural language and reasoning over structured knowledge
base. In the MutualFriend task, two agents, A and B, each have a private
knowledge base, which contains a list of friends with multiple attributes
(e.g., name, school, major, etc.). The agents must chat with each other
to find their unique mutual friend."""
_HOMEPAGE = "https://stanfordnlp.github.io/cocoa/"
_LICENSE = "Unknown"
_URLs = {
"train": "https://worksheets.codalab.org/rest/bundles/0x09c73c9db1134621bcc827689c6c3c61/contents/blob/train.json",
"dev": "https://worksheets.codalab.org/rest/bundles/0x09c73c9db1134621bcc827689c6c3c61/contents/blob/dev.json",
"test": "https://worksheets.codalab.org/rest/bundles/0x09c73c9db1134621bcc827689c6c3c61/contents/blob/test.json",
}
class MutualFriends(datasets.GeneratorBasedBuilder):
"""Mutual Friends dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=VERSION,
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"uuid": datasets.Value("string"),
"scenario_uuid": datasets.Value("string"),
"scenario_alphas": datasets.Sequence(datasets.Value("float32")),
"scenario_attributes": datasets.Sequence(
{
"unique": datasets.Value("bool_"),
"value_type": datasets.Value("string"),
"name": datasets.Value("string"),
}
),
"scenario_kbs": datasets.Sequence(
datasets.Sequence(
datasets.Sequence(
datasets.Sequence(datasets.Value("string")),
)
)
),
"agents": {
"1": datasets.Value("string"),
"0": datasets.Value("string"),
},
"outcome_reward": datasets.Value("int32"),
"events": {
"actions": datasets.Sequence(datasets.Value("string")),
"start_times": datasets.Sequence(datasets.Value("float32")),
"data_messages": datasets.Sequence(datasets.Value("string")),
"data_selects": datasets.Sequence(
{
"attributes": datasets.Sequence(datasets.Value("string")),
"values": datasets.Sequence(datasets.Value("string")),
}
),
"agents": datasets.Sequence(datasets.Value("int32")),
"times": datasets.Sequence(datasets.Value("float32")),
},
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": data_dir["test"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": data_dir["dev"],
},
),
]
def _generate_examples(self, filepath):
""" Yields examples. """
with open(filepath, encoding="utf-8") as f:
mutualfriends = json.load(f)
for id_, dialogue in enumerate(mutualfriends):
uuid = dialogue["uuid"]
scenario_uuid = dialogue["scenario_uuid"]
scenario = dialogue["scenario"]
# Note that scenario["uuid"] == scenario_uuid all the time in the data
scenario_alphas = scenario["alphas"]
scenario_attributes = scenario["attributes"]
scenario_kbs = [
[
[
list(person.keys()), # scenario_kbs_keys
list(person.values()), # scenario_kbs_values
]
for person in kb
]
for kb in scenario["kbs"]
] # The keys are not fixed, so "linearizing" the dictionaries
agents = dialogue["agents"]
outcome_reward = dialogue["outcome"]["reward"]
events_actions = []
events_start_times = []
events_data_messages = []
events_data_selects = []
events_agents = []
events_times = []
for turn in dialogue["events"]:
act = turn["action"]
events_actions.append(act)
events_start_times.append(-1 if turn["start_time"] is None else turn["start_time"])
# Note that turn["start_time"] == None in the data
if act == "message":
events_data_messages.append(turn["data"])
events_data_selects.append({"attributes": [], "values": []})
elif act == "select":
events_data_messages.append("")
events_data_selects.append(
{
"attributes": list(turn["data"].keys()),
"values": list(turn["data"].values()),
}
)
events_agents.append(turn["agent"])
events_times.append(turn["time"])
events = {
"actions": events_actions,
"start_times": events_start_times,
"data_messages": events_data_messages,
"data_selects": events_data_selects,
"agents": events_agents,
"times": events_times,
}
yield id_, {
"uuid": uuid,
"scenario_uuid": scenario_uuid,
"scenario_alphas": scenario_alphas,
"scenario_attributes": scenario_attributes,
"scenario_kbs": scenario_kbs,
"agents": agents,
"outcome_reward": outcome_reward,
"events": events,
}
| 43.199074
| 735
| 0.540778
|
35d5b29c275e3674834cf387edc54ae9566bf1ef
| 818
|
py
|
Python
|
nuke_stubs/nuke/nuke_classes/Precomp.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | 1
|
2022-01-12T01:29:16.000Z
|
2022-01-12T01:29:16.000Z
|
nuke_stubs/nuke/nuke_classes/Precomp.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
nuke_stubs/nuke/nuke_classes/Precomp.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
from numbers import Number
from typing import *
import nuke
from . import *
class Precomp(Group):
"""
"""
def __repr__(self, ):
"""
Return repr(self).
"""
return None
def __str__(self, ):
"""
Return str(self).
"""
return None
def __len__(self, ):
"""
Return len(self).
"""
return None
def __getitem__(self, key, ):
"""
Return self[key].
"""
return None
def reload(self,):
"""
self.reload() -> None
Precomp Node reload()
@return: None
"""
return None
def __init__(self, *args, **kwargs):
"""
Initialize self. See help(type(self)) for accurate signature.
"""
return None
| 17.404255
| 70
| 0.46577
|
7d8dcc2586e321b0a57b02df4374ac7729855a06
| 847
|
py
|
Python
|
sent/main/admin.py
|
jankanak/Tutorial-Based-Website-Django-
|
829a99016cce500f1c3fcd8fcaaae0dbfe0c5d8c
|
[
"bzip2-1.0.6"
] | null | null | null |
sent/main/admin.py
|
jankanak/Tutorial-Based-Website-Django-
|
829a99016cce500f1c3fcd8fcaaae0dbfe0c5d8c
|
[
"bzip2-1.0.6"
] | 8
|
2021-03-19T02:30:26.000Z
|
2022-01-13T02:36:54.000Z
|
sent/main/admin.py
|
jankanak/Tutorial-Based-Website-Django-
|
829a99016cce500f1c3fcd8fcaaae0dbfe0c5d8c
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.contrib import admin
from .models import Tutorial,TutorialCategory,TutorialSeries
from tinymce.widgets import TinyMCE
from django.db import models
# Register your models here.
#sometimes it is also needed to not all every fields is showing on the admin database .it is also needed to
#inherit specific portion of your field to the admnin site
class TutorialAdmin(admin.ModelAdmin):
fieldsets=[
("Title/date",{'fields':["tutorial_title","tutorial_published"]}),
("URL",{"fields":["tutorial_slug"]}),
("Series",{"fields":["tutorial_series"]}),
("content",{"fields":["tutorial_content"]})
]
formfield_overrides={
models.TextField:{'widget': TinyMCE() }
}
admin.site.register(TutorialCategory)
admin.site.register(TutorialSeries)
admin.site.register(Tutorial,TutorialAdmin)
| 32.576923
| 107
| 0.717828
|
970e090eb6f3ee7834a4937aa408edff7e63e6f8
| 2,511
|
py
|
Python
|
GeneticAlgorithm.py
|
jbrneto/heuristic-algoritms
|
0a97d942dc449f7936d5724f8af22a6266164e51
|
[
"MIT"
] | null | null | null |
GeneticAlgorithm.py
|
jbrneto/heuristic-algoritms
|
0a97d942dc449f7936d5724f8af22a6266164e51
|
[
"MIT"
] | null | null | null |
GeneticAlgorithm.py
|
jbrneto/heuristic-algoritms
|
0a97d942dc449f7936d5724f8af22a6266164e51
|
[
"MIT"
] | null | null | null |
import random
import bisect
class GA:
def __init__(self, population, fn_fitness, gene_pool):
self.population = population
self.fn_fitness = fn_fitness
self.gene_pool = gene_pool
self.fitnesses = [] # fitness for each individual in population
self.fit_dist = [] # distribuition of probability proportional to fitness
self.roulette = None # sampler of individuals following the fitness distribuition
def evolve(self, ngen=1000, pmut=0.1):
self.fit_population()
for _ in range(ngen):
new_population = []
for _ in range(len(self.population)):
p1, p2 = self.select(2)
child = self.recombine(p1, p2)
child = self.mutate(child, pmut)
new_population.append(child)
self.population = new_population
self.fit_population()
best = min(self.fitnesses)
return self.population[self.fitnesses.index(best)]
def select(self, r):
return [self.roulette() for i in range(r)] if r > 1 else self.roulette()
def selectUniform(self, r):
return [self.population[random.randrange(0, len(self.population))] for i in range(r)] if r > 1 else self.population[random.randrange(0, len(self.population))]
def recombine(self, x, y):
c = random.randrange(0, len(x))
return x[:c] + y[c:]
def mutate(self, x, pmut):
if random.uniform(0, 1) >= pmut:
return x
c = random.randrange(0, len(x))
r = random.randrange(0, len(self.gene_pool))
new_gene = self.gene_pool[r]
return x[:c] + [new_gene] + x[c+1:]
def fit_population(self):
self.fitnesses = list(map(lambda x: self.fn_fitness(x), self.population))
# flip roulette logic, the lower the better
total_fit = sum(self.fitnesses)
tmp_fit = list(map(lambda x: total_fit / x, self.fitnesses))
weight_dist = []
for w in tmp_fit:
weight_dist.append(w + weight_dist[-1] if weight_dist else w)
self.fit_dist = weight_dist
self.roulette = lambda: self.population[bisect.bisect(self.fit_dist, random.uniform(0, self.fit_dist[-1]))]
def fn_evaluate(array):
return sum([(i * x) for i, x in enumerate(array)])
population_size = 100
individual_size = 100
gene_pool = list(range(0, individual_size))
population = []
for _ in range(0, population_size):
population.append(random.sample(range(0, individual_size), individual_size))
ga = GA(
population=population,
fn_fitness=fn_evaluate,
gene_pool=gene_pool
)
solution = ga.evolve(ngen=1000, pmut=0.1)
print(solution)
print(fn_evaluate(solution))
| 31
| 162
| 0.685783
|
31cae82464458f7802925bdb63f9aceb3a50e135
| 7,380
|
py
|
Python
|
test/pytest/conftest.py
|
bitigchi/MuditaOS
|
425d23e454e09fd6ae274b00f8d19c57a577aa94
|
[
"BSL-1.0"
] | 1
|
2021-11-11T22:56:43.000Z
|
2021-11-11T22:56:43.000Z
|
test/pytest/conftest.py
|
bitigchi/MuditaOS
|
425d23e454e09fd6ae274b00f8d19c57a577aa94
|
[
"BSL-1.0"
] | null | null | null |
test/pytest/conftest.py
|
bitigchi/MuditaOS
|
425d23e454e09fd6ae274b00f8d19c57a577aa94
|
[
"BSL-1.0"
] | null | null | null |
# Copyright (c) 2017-2021, Mudita Sp. z.o.o. All rights reserved.
# For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
import time
import pytest
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from harness import log
from harness.harness import Harness
from harness import utils
from harness.interface.error import TestError, Error
from harness.interface.CDCSerial import Keytype, CDCSerial as serial
from harness.interface.defs import key_codes
simulator_port = 'simulator'
def pytest_addoption(parser):
parser.addoption("--port", type=str, action="store", required=False)
parser.addoption("--timeout", type=int, action="store", default=15)
parser.addoption("--phone_number", type=int, action="store")
parser.addoption("--call_duration", type=int, action="store", default=30)
parser.addoption("--sms_text", type=str, action="store", default='')
parser.addoption("--bt_device", type=str, action="store", default='')
@pytest.fixture(scope='session')
def phone_number(request):
phone_number = request.config.option.phone_number
assert phone_number
return phone_number
@pytest.fixture(scope='session')
def call_duration(request):
call_duration = request.config.option.call_duration
assert call_duration
return call_duration
@pytest.fixture(scope='session')
def sms_text(request):
sms_text = request.config.option.sms_text
assert sms_text != ''
return sms_text
@pytest.fixture(scope='session')
def bt_device(request):
bt_device = request.config.option.bt_device
return bt_device
@pytest.fixture(scope='session')
def harness(request):
'''
Try to init one Pure phone with serial port path or automatically
'''
port_name = request.config.option.port
TIMEOUT = request.config.option.timeout
timeout_started = time.time()
RETRY_EVERY_SECONDS = 1.0
try:
if port_name is None:
log.warning("no port provided! trying automatic detection")
harness = None
with utils.Timeout.limit(seconds=TIMEOUT):
while not harness:
try:
harness = Harness.from_detect()
except TestError as e:
if e.get_error_code() == Error.PORT_NOT_FOUND:
log.info(f"waiting for a serial port… ({TIMEOUT- int(time.time() - timeout_started)})")
time.sleep(RETRY_EVERY_SECONDS)
else:
assert '/dev' in port_name or simulator_port in port_name
if simulator_port in port_name:
file = None
with utils.Timeout.limit(seconds=TIMEOUT):
while not file:
try:
file = open("/tmp/purephone_pts_name", "r")
except FileNotFoundError as err:
log.info(
f"waiting for a simulator port… ({TIMEOUT- int(time.time() - timeout_started)})")
time.sleep(RETRY_EVERY_SECONDS)
port_name = file.readline()
if port_name.isascii():
log.debug("found {} entry!".format(port_name))
else:
pytest.exit("not a valid sim pts entry!")
harness = Harness(port_name)
'''
Wait for endpoints to initialize
'''
testbody = {"ui": True, "getWindow": True}
result = None
with utils.Timeout.limit(seconds=305):
while not result:
try:
result = harness.endpoint_request("developerMode", "get", testbody)
except ValueError:
log.info("Endpoints not ready..")
except utils.Timeout:
pytest.exit("couldn't find any viable port. exiting")
else:
return harness
@pytest.fixture(scope='session')
def harnesses():
'''
Automatically init at least two Pure phones
'''
found_pures = serial.find_Pures()
harnesses = [Harness(pure) for pure in found_pures]
if not len(harnesses) >= 2:
pytest.skip("At least two phones are needed for this test")
assert len(harnesses) >= 2
return harnesses
@pytest.fixture(scope='session')
def phone_unlocked(harness):
harness.unlock_phone()
assert not harness.is_phone_locked()
@pytest.fixture(scope='session')
def phone_locked(harness):
harness.lock_phone()
assert harness.is_phone_locked()
@pytest.fixture(scope='session')
def phones_unlocked(harnesses):
for harness in harnesses:
harness.unlock_phone()
assert not harness.is_phone_locked()
@pytest.fixture(scope='session')
def phone_in_desktop(harness):
# go to desktop
if harness.get_application_name() != "ApplicationDesktop":
harness.connection.send_key_code(key_codes["fnRight"], Keytype.long_press)
# in some cases we have to do it twice
if harness.get_application_name() != "ApplicationDesktop":
harness.connection.send_key_code(key_codes["fnRight"], Keytype.long_press)
# assert that we are in ApplicationDesktop
assert harness.get_application_name() == "ApplicationDesktop"
@pytest.fixture(scope='function')
def phone_ends_test_in_desktop(harness):
yield
target_application = "ApplicationDesktop"
target_window = "MainWindow"
log.info(f"returning to {target_window} of {target_application} ...")
time.sleep(1)
if harness.get_application_name() != target_application :
body = {"switchApplication" : {"applicationName": target_application, "windowName" : target_window }}
harness.endpoint_request("developerMode", "put", body)
time.sleep(1)
max_retry_counter = 5
while harness.get_application_name() != target_application:
max_retry_counter -= 1
if max_retry_counter == 0:
break
log.info(f"Not in {target_application}, {max_retry_counter} attempts left...")
time.sleep(1)
else :
# switching window in case ApplicationDesktop is not on MainWindow:
body = {"switchWindow" : {"applicationName": target_application, "windowName" : target_window }}
harness.endpoint_request("developerMode", "put", body)
time.sleep(1)
# assert that we are in ApplicationDesktop
assert harness.get_application_name() == target_application
time.sleep(1)
def pytest_configure(config):
config.addinivalue_line("markers",
"service_desktop_test: mark test if it's related to service-desktop API")
config.addinivalue_line("markers",
"rt1051: mark test if it's target only (eg. calls, messages)")
config.addinivalue_line("markers",
"usb_cdc_echo: mark test if it's intended for usb-cdc echo mode")
config.addinivalue_line("markers",
"two_sim_cards: mark test in case when two sim cards are required")
config.addinivalue_line("markers",
"backup: subset of backup user data tests")
config.addinivalue_line("markers",
"restore: subset of restore user data tests")
| 36.35468
| 115
| 0.634417
|
569a8a9d6d5c3088bd587230f947e21846f2bc2f
| 511
|
py
|
Python
|
DEMO.py
|
renjunxiang/API_baidu_ai
|
ea784a7dceb9e34f7f61fa523a4985cf44bd0ec8
|
[
"MIT"
] | null | null | null |
DEMO.py
|
renjunxiang/API_baidu_ai
|
ea784a7dceb9e34f7f61fa523a4985cf44bd0ec8
|
[
"MIT"
] | null | null | null |
DEMO.py
|
renjunxiang/API_baidu_ai
|
ea784a7dceb9e34f7f61fa523a4985cf44bd0ec8
|
[
"MIT"
] | null | null | null |
from API_baidu import NLP_SDK, entity_annotation, face_detect
import os
text = '委托他人申请的,还须提交委托书、委托代理人或指定代表身份证明(如身份证、外籍人员护照等)原件、复印件'
results = entity_annotation(text=text)
print(results)
text = '本区办理的营业执照且登记档案中的场地使用证明仍在有效期内,可使用《关于经营场所合法使用证明材料的情况说明》'
result = NLP_SDK(text=text, method='depParser')
print(result)
base_path = os.path.dirname(__file__)
results = face_detect(image_path=base_path + '/API_baidu/picture/e1.jpg',
show=True,
savepath=None)
print(results)
| 30.058824
| 73
| 0.733855
|
0e083608bd6af9f6841f6c44947463f63f1ecc44
| 44,709
|
py
|
Python
|
segnlp/resources/am/ams.py
|
AxlAlm/SegNLP
|
89b8d077952397dfcea089376b373b117bcf6a65
|
[
"Apache-2.0"
] | 1
|
2021-01-21T17:16:55.000Z
|
2021-01-21T17:16:55.000Z
|
segnlp/resources/am/ams.py
|
AxlAlm/SegNLP
|
89b8d077952397dfcea089376b373b117bcf6a65
|
[
"Apache-2.0"
] | 2
|
2021-01-24T20:07:54.000Z
|
2021-01-26T16:59:28.000Z
|
segnlp/resources/am/ams.py
|
AxlAlm/SegNLP
|
89b8d077952397dfcea089376b373b117bcf6a65
|
[
"Apache-2.0"
] | 1
|
2021-01-21T17:16:57.000Z
|
2021-01-21T17:16:57.000Z
|
ams = [
"in short , i can say for certain that",
"to clarify ,",
"wherever possible ,",
"in my opinion , i agree with the second idea that",
"striking contrast ,",
"to give an example ,",
"some people believe that",
"first and foremost reason is that",
"inevitably ,",
"i think that a",
"they opine that",
"on the other hand , there are some people think that",
"in this essay , the reasons for why i agree that",
"for me",
"therefore , we can commence to agree that",
"i do n't deny that",
"despite of the above arguments , i still strongly believe that",
"i personally believe that",
"so , this example makes it obvious that",
"because ,",
", it should be emphasized that",
"i personally suppose that",
"to summarize , i strongly agree that",
"to wrap it all up ,",
"secondly , parents and schools should give priority on directing their kids about handwriting as",
"but i believe this is not the issue because",
"from my personal experience i recall that",
"nonetheless ,",
"however , as the society grows , human rights become more highly respected ,",
"above all ,",
"it is needless to say that",
"i believe that the",
"but on other hand ,",
"i personally believe that even when",
"at a higher level ,",
"those who disagree , believe that",
"but these young people do not take into account that",
"in conclusion , i reckon that",
"however",
"so i suggest",
"this seems to be easy but",
"on top of that ,",
"it is clear to recognize that",
"instead ,",
"because of the similarity i talked before ,",
"on one hand , i agree that",
"at least ,",
"to put it in a nutshell , the suggestion of",
"as hence",
", for me ,",
"for the above reasons , i agree that",
"in conclusion , the above stated reasons clearly outweigh the fact that",
"the soaring crime rate has led to the proposal that",
"as for the two integral components within the system , elementary and advanced education , there 's no doubt that a",
"in conclusion , my point of view is that , although",
"the main advantage is that",
", it is my belief that",
"in conclusion , i believe",
"first of all , some people claim that",
"lastly ,",
"hence , from this case , we are capable of stating that",
"for example , as we know ,",
"another issue is that in the ever - accelerated updating of science and technology ,",
"to sum up , it is clear that",
"taking into account of all these factors , we may reach the conclusion that",
"although some people argue that",
"to sum up , it makes no difference , people from young ages to old ages are free to make a decision , nonetheless sometimes to prevent some problems coming up for young individuals ,",
"from my point of view ,",
"what is worse ,",
"nevertheless , while i admit that",
"in my opinion ,",
"while i agree this will bring negative impacts on the younger generation ,",
"specifically ,",
"in terms of ethics ,",
"or , under many circumstances ,",
"by doing this ,",
"hence",
"next ,",
"first of all , it is true that",
"by way of conclusion , i once again restate my position that",
"from this example , it is obvious that",
"firstly ,",
"as far as i am concerned , i agree with that",
"on one hand ,",
"considering all the benefits ,",
"finally",
"thus , it is clear that",
"so that ,",
"relatively ,",
"in addition to this ,",
"it is not surprised that",
"i would maintain that",
", i claim that",
", but ,",
"first of all , it seems to be true that",
"while some people believe that",
"i always believe that a",
"there are many advantages of distance learning , but",
"adding to that ,",
"in conclusion , i personally agree with that",
"still ,",
"first of all",
"first of all , one of the most significant upsides is that",
"in conclusion , i totally believe that",
"while i accept that job can offer happiness to some people , i believe that",
"this is to say ,",
"besides the methods , from my point of view ,",
"in this case , i advocate the idea that",
", i completely argue that",
"besides , it is undeniable that",
"as a result",
"so , for example",
"for another ,",
"in short ,",
"i found that",
"in the first instance ,",
"firstly , i think that",
"according to the discuss above ,",
"however , from my perspective , i think",
"thus , i firmly believe that",
"alternatively ,",
"secondly , it is believed that",
"in a larger sense ,",
"there is no denying that",
"so the truth lies somewhere closer to the fact that a",
"to sum up , even when",
"considering this fact ,",
"in this condition , because",
"on the other hand , some people maintain that",
"to start ,",
"in conclusion , i feel certain that",
"and",
"most of the time ,",
"after analyzing smoke illegalization in public places in sri lanka ,",
"as we know ,",
"while most scientists claim that this is vital for improving human health , i believe that",
"for those reasons ,",
"thus , i would conclude that",
"in my opinion , there is a likelihood that",
"thirdly , i totally believe",
"nevertheless , opponents of modern communications claim that only",
"in my opinion , one of the main reasons for supporting this statement is that",
"in addition to that ,",
"in conclusion , from my point of view ,",
"however , i tend to believe",
"therefore ,",
"in my opinion , i agree that",
"in paticular ,",
"therefore , i believe",
"and in present ,",
"under this circumstance ,",
"in their opinion ,",
"moreover , the proponents of globalization idea point out",
"thus , in my point of view , although",
"but to me ,",
"in conclusion , even though",
"in my personal point of view ,",
"although some people claim that the advantages of advertising are outweigh the advantages , i believe that",
"however , i believe that",
"nonetheless , looking from another perspective ,",
"hence , i think",
"some people think that",
"in conclusion , after analysing the effects of world games both for countries at war and for organising countries ,",
"since ,",
"another point that should be taken into account is",
"or ,",
"from my perspective , i think",
"i mean ,",
"meanwhile , due to the fact that",
"in conclusion , in spite of some negative effects ,",
"without doubt ,",
"in addition , i believe that",
"in conclusion , i think that",
"as an example ,",
"to sum up , i think",
"in spite of bring fantastic advantages ,",
"from that",
"then ,",
"first and foremost , i believe that",
"the first reason is",
"apart from that ,",
"ultimately i feel that",
"as this shows ,",
"the main reason is that",
"first of all , there is no doubt that",
"despite this , it is undeniable that",
"even in some cases ,",
"it would be great if every zoo kept the animals in perfect conditions , but regardless of the condition ,",
"they further point out that",
"if i have to choose ,",
"to add up ,",
"besides , although",
"in conclusion , i admit that",
"together ,",
"firstly , the distinguishing fact of the",
"this , in turn ,",
"some people argue that",
"first , indeed",
"the third convincing reason is that",
"by considering this and being aware of that students have not any responsibility and other difficulty in comparison with a young man , we can infer that why",
"third , it is clear that",
"obviously",
"and therefore",
"as all of us know ,",
"but i tend to think that",
"; thus ,",
"nowaday ,",
"to sum up , i completely approve of the statement which says",
"nevertheless , with all the mentioned demerits ,",
"and consequently ,",
"because",
"this fortifies the fact that a",
"admittedly , on the other hands , there might be some dissidents proclaiming that",
"in actual ,",
", furthermore ,",
"with regard to the former ,",
"therefore , i will conclude that",
"in spite of many problems of having own business ,",
"first of all , although",
"it is widely thought that",
"then",
"so , i believe that",
"first , it is generally understood that",
"what 's even worse ,",
"accordingly ,",
"to sum thing up , i agree that",
", therefor",
"however , i strongly believe that",
"my personal view is that despite the emphasis of citizen safety ,",
"in my opinion , i strongly agree that",
"those activities have proven that",
"first , in my view",
"on the other hand , i believe",
"personally , i think",
"some opponents says that it is cruel to animals and nature , however , i believe that no sensible person will deny that",
"by way of conclusion , i believe that",
"in addition",
", so i think",
"eventually ,",
"it is clearly understood that",
"yet ,",
"despite the importance of working in team , i still believe that",
"when they know",
"to conclude , both options have their advantages , but personally i think",
"to sum up , it is evident for me that",
"in conclusion , i agree that",
"admittedly ,",
"due to the reasons given above ,",
", therefore",
"notable one of the shortcomings is that",
"in the meantime ,",
"on the one hand , it is clear that",
"recently , some people argue that",
"take myself for example ,",
"in conclusion , it is undeniable that",
"with this kind of power",
"as sport has become a common passion which passed over every national border ,",
"although there are some advantages to learning a foreign language in its original country , i firmly believe that",
"in conclusion , there is to say that , given the arguments above ,",
"it is true while",
"i completely agree with the idea that",
"but i believe that",
"to quote an instance ,",
"they argue that",
"but as far as i am concerned , i agree with the idea , which is that",
"in any case ,",
"i favor the former ; that is ,",
"for example , because",
"based on the reasons demonstrate above , i believe that",
"my view is that the",
"; however , i believe",
"on the other hand , i believe that",
"it is widely acknowledged that",
"instead",
"some people , however , still believe that they can exist for long time ; others disagree , arguing that",
"i m my opinion ,",
"even though",
"as the world 's situation is always changing ,",
"the message is",
"further more ,",
"futhermore ,",
"finally , i believe that",
"as far as i am concerned , i agree with the view that",
"however , some say that",
"as far as i am concerned , i think",
", thereby",
"on the one hand , i agree that a",
"some people claim that",
", to find that why",
"in the ways that",
"it is no doubt that the",
"from my point of view , i am in favor the former statement that",
"although multitudes of people think that knowledge of ideas and concepts is more vital for students , i am in the conviction that",
"taking everything into account , i strongly believe that",
"however , i am sure that",
"as such ,",
"however i strongly believe that",
", however ,",
"i favor the latter ; that is ,",
"however , from my perspective ,",
"on the other hand , it is no doubt that",
"by way of conclusion , it is true that",
"however , i agree that",
"; therefore , i believe",
"i agree with the statement that",
"this is because",
"to me ,",
"the first and foremost reason is that",
"regardless of some exceptions ,",
"finally ,",
"after careful consideration of both sides of the argument ,",
"secondly ,",
"in summary , even though",
"last but not least ,",
"i really believe that",
"undoubtedly ,",
"as you can see ,",
"it is true that",
"they argue that people ,",
"people who hold different opinion may argue that",
"despite all these positive aspects , i think that",
"another reason is that",
"in my views , i agree that",
"on other side of the coin , many people believe that",
"as a practical epitome ,",
", eating , etc . , but",
"however , to my mind",
"i belive this , due to the fact that",
"also ,",
"that 's not enough ,",
"frequently ,",
"after all ,",
"in my personal experience ,",
", i am inclined to believe that",
"therefore , i would conclude that a",
"in my personal opinion , i am totally agreed with the view that",
"in the same way ,",
"the long and the short of it ,",
"i agree that nowadays the learning process benefits greatly from computers , but",
"for this reason ,",
"secondly , students should not be required to attend classes because",
"in contrast ,",
"all in all ,",
"from my viewpoint ,",
"in a nutshell , varied characteristics of various people , feeling tired of having the same occupation and solving the unemployed problem drive me to agree with the statement that",
"as a fact that",
"this explains why",
"for instance",
"however , on the other side of the coin are voices in the opposition , saying that",
"as example",
", due to the fact that",
"nevertheless , it is undeniable that",
"all in all , from discussed reasons , we can conclude that",
"finally , beside all above mentioned items ,",
"first of all , i find the excitement of big cities to be attractive ,",
"it 's undeniable that",
"in such circumstances ,",
"on balance ,",
"in conclusion , though",
"although modern technology make it possible , i still believe",
"from reasons listed above , we can safely draw the conclusion that ,",
"although some people think that the major part of school schedule should be entirely devoted to academics , i truly believe that",
"to begin with , it is undeniable that",
"personally , i tend to agree with the point of view that",
"on the other hand , there are , however , people who think that",
"to be specific ,",
"there are people who believe that",
"however , despite the drawback mentioned above ,",
"despite the argument of some people that",
"individually ,",
"by which i mean ,",
"by contrast ,",
"also , i think",
"however , in my view ,",
"therefore in my view ,",
"most of all ,",
", i think they are wrong , since",
"is , primarily",
"to picture it ,",
"for another example ,",
", it is also true that",
"it is especially true if we consider the fact that a",
"therefore , it can be said that",
"collectively ,",
"to sumarize ,",
", for instance ,",
"taken all together ,",
"it is a matter of fact that",
"i would argue that",
"at times ,",
"apart from the one i have mentioned above , another equally important aspect is",
"however , as far as i am concerned , i strongly agree with the view that",
"however , according to my experiences ,",
", so i believe that",
"finally , i think that",
"last i want to mention is that even though",
"obviousness ,",
"firstly , it is an undeniable fact that",
"many employers argue that",
"many people find that",
"all in all , it is obvious that",
"in summary , although",
"in my personal opinion ,",
"to set another example ,",
"apart from saving a lot of money ,",
"however , from my point of view ,",
"at the end ,",
"i belive that",
"in conclusion , from the above views , although",
"meanwhile ,",
"so",
", and as a result ,",
"they think that",
"; so",
"to illustrate ,",
", but actually",
"according to a recent survey ,",
"based on the above discussion , i agree with that",
"for the above reasons and examples , it could be widely thought that",
"in conclusion as promising as renewable resources of energy sound ,",
"on the whole , i do believe that",
"nobody has a life with a lot of fun ;",
"in conclusion , it seems to me that",
"so ,",
"not to mention , that",
"besides that ,",
"thus , it is apparent that",
"during our trip ,",
"however , i am convinced that",
"thus , it is argued that",
"i strongly believe that",
"for these reasons ,",
"it can be said that",
"personally , i think that",
"finally , some assert that",
"it is clear that",
"however ,",
"i agree",
"i am totally convinced that",
"nevertheless , it is not unreasonable that some people think that",
"secondly , there are clear evidences that",
"another important point is that",
"to conclude this ,",
"furthermore ,",
"first , critics of zoology argue that",
"similarly ,",
"even so , i strongly believe that",
"to conclude ,",
"for instant ,",
"some people spot out that",
"another reason to fortify my opinion is that",
", since",
"besides ,",
"to sum up , i would like to say that for me both options of communicating are possible , but if this is an important and not urgent question",
"when it comes to the question whether classmates or colleagues should work on projects in person or by email , i am of the view that",
"one other important factor is",
"i agree with that idea that",
"i firmly believe that",
"all advantages considered ,",
"what is meant by this is that",
"but",
"it is widely known that",
"for instance ,",
"; in addition ,",
"in considering whether one should choose a job similar to his or her parents ' , i have mentioned a few reasons that",
"another reason ,",
"first of all , many of us would agree to the fact that",
"as a result of this ,",
"third , some parents are afraid that",
"hence , i agree with the majority , because",
"take an example ,",
"in conclusion ,",
"importantly ,",
"apparently",
"that 's why i believe that",
"although some people believe that students should spend the whole day on academic studies , i nevertheless believe that",
"some may argue that as time is different ,",
"this means , that",
"therefore , e",
"as studies have shown that",
"of course ,",
"according to this opinion ,",
", not to mention",
"finally , of course ,",
"in other word ,",
"increasingly however ,",
"concerning that",
"secondly , in my mind ,",
", so",
"thereafter ,",
"each year ,",
"today ,",
"what is more ,",
"however , i think",
"in this way ,",
"another reason why artists should be helped is because",
"for the reasons mentioned above ,",
"first of all , since",
"hence , it is clear that",
"with this situation , some people believe that",
"the first reason is that",
"of course",
"consequently , i think that",
"thereby",
"these are reasons that why",
"this is simply because",
"although",
"drive me to believe that",
"so that",
"however , the fact is that",
"the fact is that",
"it is also worth mentioning that",
", but we should also bear in mind that",
"another example is that",
"in conclusion , as a young women",
"from my point of view",
"in conclusion , i certainly can say that",
"hence , it is evident that",
"to begin with , i agree that",
"even",
", i support that",
"on the other hand",
"by contrast , some experts believe that",
"all in a nutshell ,",
"in spite of the fact",
"thus it is very obvious that",
"to put it all in a nutshell , i pen out saying that",
"environment ,",
"this issue is a controversial one , but in my opinion a closer examination reveals that",
"they point out that",
"to some extent ,",
"more importantly ,",
"moreover , it seems to me that",
"therefore , i agree that",
"from given the evidences , it seems to me that it becomes hard for students to choose between their living options since",
"nevertheless ,",
"in the other hand ,",
"in a word ,",
"i think that",
"in fact ,",
"in my viewpoint ,",
", and , furthermore ,",
"; thus",
"in conclusion , i admittedly and strongly agree that",
"the issue is controversial but in my opinion ,",
"in addition to the previous point ,",
"in conclusion , i completely agree that",
"i strongly disagree with this affirmation because i believe",
"furthermore",
"on the other hand there are people who say that",
"it is obvious that",
"but in fact ,",
"this is probably because",
"however , in my perspective ,",
"some people may argue that",
"i think , however ,",
", and this means that",
"ever since",
"after that i understood ,",
"taking all into account , i think that",
"in my opinion , however ,",
"to sum up , i would say that",
"third ,",
"in order to tackle the problem ,",
"another reason should be mentioned is that",
"the main reason for this issue is that",
"it is because",
"first and foremost ,",
"in conclusion , despite the contribution of it to the society ,",
"although miscellaneous people may apply diverse strategies , such as reading , watching movies , or working in their gardens , i argue that",
"however , as",
"the first reason why",
"in opposite ,",
"in summary ,",
"not only that ,",
"another supporting reason is that",
"i would not go along with their viewpoint as",
"therefore , in my opinion , i accede with notion that",
"there can be no doubt that",
", it is because",
"while some argue that the budget allocation to the arts is waste of national resources and the money should instead be spent on public services , i contend that",
"nevertheless , i think",
"it is out of of the questioned that",
"from then",
"overall , i believe that",
", thus ,",
"because of all the reasons mentioned above , namely",
"unfortunately",
"another function that",
"nowadays ,",
"needless to say ,",
"it is hard to judge which generation 's problems are more difficult but",
"the first and foremost reason lies in an inevitable fact that",
"this point has some merits on the surface ; however ,",
"admittedly , in concerning the social relationship with peers , some may support that",
"however , the other side of the coin is that ,",
"also",
"admittedly , to some extent ,",
"to conclude , although",
"however , despite the fact that",
"many people agree that",
"; while on the contrary ,",
"to sum up , i am strongly convinced that",
"first , as you can see that",
"instead of this ,",
"on top of this ,",
"in concluding ,",
"equally important ,",
"but ,",
"to sum up , i reaffirm that although",
"it ’s undisputed that",
"in spite of the importance of sports activities ,",
"this clearly shows that",
"in this view ,",
"however , in my opinion ,",
"actually ,",
"some people thinks that",
"as far as i am concerned , i agree with the opinion",
"as illustrated before ,",
"and thus",
"otherwise , another vital reason why",
"by way of conclusion , it is my belief that",
", while",
"i , for one ,",
"that is to say ,",
"i admit that",
"in the end ,",
"in my opinion though ,",
"many people hold the opinion that",
"the first important point that i want to make is that",
", for",
"this is due to the fact that ,",
", not only because",
"although it is true that",
"however , while",
"to sum up , i would maintain that",
"first of all , it is seen that",
"so obviously ,",
"we can see now that",
"from my prospect , i believe that",
"on the other hand , the advocators believe that",
"i think",
"all in all , i suppose that",
"in conclusion , i strongly believe that",
"however , in my point of view ,",
"some people might say i am silly , but i think",
"another point is that ,",
"although multitudes of people think that sports and social activities are not as necessary as academic subjects in colleges , i think that",
"for these reasons , i agree that",
"as a matter of fact ,",
"although i conceded",
"what 's more ,",
"after analyzing both points of view ,",
"in the first place , there is no doubt that",
"that is how ,",
"as can be seen from this , is that a",
"because of that ,",
"in my view ,",
"in conclusion , it is convincing that",
"another perspective ,",
"for the reasons mentioned above , i believe that",
"while critics castigating about the exorbitant use of oil and other insisting to charged drivers with extra fee during the rush hours for their selfish conducts , i hold the contention that",
"first of all , as i mentioned above",
"although there are good arguments in favour of this trend should be encouraged , i personally think that",
"because i strongly feel that",
"in the past few decades ,",
"as a token of goodwill ,",
"however , i still agree that",
"in conclusion , i agree with that",
"personally , i believe",
"responsible because",
"as a result , i believe that",
"beside ,",
"however , i strongly agree with that",
"it is my personal belief that",
"while some people might think that this international tourism has negative effects on the destination countries , i would contend that",
"to illustrate my point ,",
"; therefore ,",
"although it is real in some aspects , i believe that",
", but i strongly believe",
"as far as my opinion is concerned ,",
"however it is an obvious fact that",
"in the end , i think",
"i want to say that",
"as a lover of travel ,",
"instead , the real causes of traffic jam may fall on various of grounds , and therefore ,",
"however , some people might argue that",
"with those arguments in mind , i would conclude that",
"especially ,",
"thirdly , at some point ,",
"most important of all ,",
"in a nutshell , i am convinced that",
"another thing that put big cities in front of small towns is",
"in the contrary ,",
"and as a result",
"this is only partially true ,",
"even so ,",
"in conclusion , i am firmly supporting the view , that",
"in conclusion , i personally believe that",
"in this case ,",
"more specifically ,",
", thus",
", i certainly believe that",
"i have observed that",
"to be more specific ,",
"while some critics may argue that ,",
"many parents supporting for the idea of tax discounts claim that",
"that ’s why",
"in conclusion , while",
"secondary ,",
"in a nutshell , i believe that",
"truthfully ,",
"in these cases ,",
"to add to it ,",
"i however , agree to this only to a certain extent and believe that",
"as we all know ,",
"by the way ,",
"admittedly , on the other hand , there might be some dissidents proclaiming that",
"moreover , another equally crucial aspect is that",
"secondly , it is obvious that",
"apart from the one i have mentioned above , another equally crucible aspect is that",
"i agree with this opinion to some extent , but",
"for the above reasons , i believe that",
"i believe that",
"despite friendship is one of the most important values in a man 's life , people might have different preferences : some of them prefer to spend time with one or two close friends , while",
"for one ,",
"to sum up , i would have to say that although",
"in conclusion , of course ,",
"from my perspective , i agree with the view that",
"after having considered all the problems that i have discussed above , we can finally draw a conclusion that although",
"this is of paramount importance so that",
"the second reason why",
", also",
"for me , i believe",
"in a positive point of view ,",
"therefore this proves that",
"with the rapid developing pace ,",
"many people claim that",
"in facts ,",
"consequently ,",
"but we have to realize the fact :",
"to cite an example ,",
"time and again ,",
"looking from another perspective ,",
"this example shows that",
"all in all , that",
"all in all , i agree that",
"that is because ,",
"the history shows that",
"all thing considered ,",
"first and foremost , i truly believe that",
", what 's more ,",
"simply put ,",
"by considering all reasons ,",
"while",
"as for me ,",
"at this moment ,",
"critics may argue that",
"from all analysis suggested above ,",
"on the basis of the points mentioned above , i convinced that",
"from my perspective ,",
"nonetheless , i believe that",
"it is generally believed that ,",
"no doubt ,",
"to conclude , i definitely feel that",
"for",
"alike ,",
"if students want to get used to the society quickly ,",
"in another word ,",
"these examples make it obvious that",
"to sum up , i admire my father 's choice , while",
", i think",
"the other reason is that",
"in this sense ,",
"in conclusion , i firmly believe that",
"therefore , i would conclude that",
"to be precise , for instance ,",
"in conclusion , i would concede that",
"obviously ,",
"the outcome is that",
"some people think that a",
", that 's why",
"averagely ,",
"although some argue that a",
"from this experience , i believe that a",
"from my point of view , i believe that",
"futuermore ,",
"second , i think that a",
"to begin with , the major cause that leads to urban migration of young adults is that",
"for example",
"other people say that",
", i firmly object to it because",
"; also ,",
"apart from this ,",
"besides easing tensions ,",
"following both the point of views ,",
"in tradition ,",
"second ,",
"lastly , the most important reason i support this idea is",
"in this case , i would definitely agree that",
"one benefit is that",
"the first reason why people are willing to visit museums when they travel to a new territory is that",
"to conclude , i want to say that",
"also , it is obvious that ,",
"here is an example :",
"secondly , even though",
"however , despite the advantages of small town life ,",
"admittedly",
"as",
"in conclusion , although",
"as a consequence ,",
"the reason is that",
"indeed ,",
"in order to solve the education manpower problem ,",
"finally , although",
"in other words ,",
"although i accept that students have to study many subjects in school ,",
", and as a result",
"we can not deny that",
"further ,",
"in general ,",
"furthermore , even though some customers do not worry about the threat that second - hand smoke imposes on them ,",
"however , the opponents believe that",
"to sum up ,",
"to begin with ,",
"since",
"in addition to controlling emission carbon dioxide ,",
"on the contrary ,",
"therefore , to conclude my opinion on this statement , i want to mentioned that",
"it is obvious axiomatic that",
"personally , i believe that",
"on the other side ,",
"to sum it up , from above mentioned facts it can easily be deduced that",
"from the point of the local community ,",
"nevertheless , opponents of online - degrees would argue that",
"to some extent , i do not agree with this assertion because i believe that",
"consequently , no matter from the view of individual development or the relationship between competition and cooperation we can receive the same conclusion that a",
"by doing so ,",
"the result shows that",
"we must acknowledge that",
"hence it is always said that",
", so that",
"little wonder , then , that",
"first , it 's true",
"but like i said ,",
"other than that ,",
"last but not least , nowadays ,",
"that is ,",
"from another point of view ,",
"as the impacts for those pollution and traffic issues are extremely far - reaching ,",
"worse still ,",
"in conclusion , i strongly agree that",
"my opinion based on the reasons that every tough",
"at the same time ,",
"on the one hand ,",
"for another thing ,",
"this means that the",
"as i have said ,",
"admittedly , opponents may blame that",
"to conclude , as far as i am concerned ,",
"however , in order to tackle this problem more effectively ,",
", i would contend that",
"on the whole ,",
"in conclusion , i agree with this statement that",
"hence , from this case we are capable of stating that",
"in the second place ,",
"in my opinion",
"however , because",
", because",
"generally speaking , i definitely agree with the statement that",
"initially , if we look into the fact ,",
"in the conclusion , i would like to say that",
"in the event of an argument ,",
", and to achieve this ,",
"despite the fact that",
"thus this makes it clear that",
"it is believed that",
"everyone agrees that",
"there are few of those who are concerned think the current practice should be altered and that",
"nevertheless , i feel that",
"i will establish my view by analysing how",
"certainly",
"as far as i am concerned , i strongly believe that",
"nowadays , more and more people begin to select prepared food as their daily meals , since",
"however , i would argue that",
"in addition , although",
"the best example is",
"clearly ,",
"they may say that",
"there are several reasons why",
"therefore , i believe that",
"some people claim",
"through this way ,",
"in this regard",
"it is a debatable subject that",
"another significant factor of universities is that",
"fortunately ,",
"not only because",
"because , at least for him ,",
"from this ,",
"last ,",
"based on these grounds , it seems that",
"based on the reasons demonstrated above ,",
"first of all , as we all know ,",
"thus",
"this means that",
"thus , i strongly believe",
"on the other hand , some people oppose the argument , saying",
"in order to protect animals , from my perspective ,",
"i absolutely agree that",
"that ’s as important thing because",
", even though",
"finally yet importantly ,",
"for the same reason ,",
"some people say :",
"it is undeniable that",
"at this point ,",
"furthermore / moreover ,",
", i firmly believe that",
"especially",
"to be precise ,",
"however , someone may maintain that",
", and",
"therefore , to make use of time ,",
"for the above - mentioned reasons ,",
", therefore ,",
"firstly , the reason why university students should not be required to attend class is",
"some",
"on most occasions ,",
"those who advocate focusing on one subject believe that",
"having said that",
"in my opinion , i believe",
"this is because ,",
"secondly",
"in sum ,",
"the reason why most people oppose to zoos is that",
"personally ,",
"admittedly , on the other hand , there might be some people proclaiming that",
"by way of conclusion , i do not root for the idea that restrictions should be put on artists work ; on the contrary ,",
"this means ,",
"in order to solve these problems ,",
", although",
"in sum , though some people may disagree with me , as i explained my reasons and examples above , none of those factors are important and sufficient enough by themselves and",
"that 's why",
"due to this neglect ,",
"it is undoubted that",
", like my mom , i still hold the firm view that",
"it is sure that",
"the best choice is obviously stand on my side , which is that a",
"to conclude , i believe",
"another reason that makes learning by doing things more advantageous than learning by reading about things or listening to people talking about is that",
"to my view , i incline to believe",
"every day ,",
"to sum up , i believe that a",
"but from my experience ,",
"central to the supporting arguments of this issue is the idea that",
"second , i think",
"nevertheless , there are some other causes why",
"nevertheless , i assert that",
"though",
"to illustrate this further ,",
"few individuals assume that",
"and i believe",
"all in all , i am in the conviction that",
"moreover",
"for this point , they all think that",
"thus , i think",
"in my experience ,",
"due to the fact that",
"there is no doubt that",
"in conclusion , i strongly hold my point that",
"in light of the above - mentioned facts , one can easily conclude that",
"because of",
"although ,",
"to solve these issues ,",
"; moreover ,",
"to start with ,",
"so , from what have been discussed above , i strongly affirm the conclusion that",
"due to",
", in my opinion",
"economically speaking ,",
"that 's the main reason",
"most importantly ,",
"the first and foremost reason lies in the inevitable fact that",
", nevertheless",
"first of all , i do support the idea that",
"first of all , many people think that",
"as you can imagine ,",
", but also",
"another reason is that ,",
"unfortunately ,",
"all things considered ,",
"first , i think",
"primary ,",
"after analyzing these two points of view , it is believed that",
"personally i think ,",
"i agree with both opinions that",
"nevertheless , for the reasons that i have presented above ,",
"however , i would contend that",
"thirdly ,",
"nevertheless , i believe that",
"hence , from this case ,",
"as we can see",
"additionally ,",
"in addition to what mentioned ,",
"therefore",
"according to my opinion , because",
"because not only do",
"those who feel that",
"to sum up , although",
"as a result ,",
"many people believe that",
"for the primary concern ,",
", and accordingly ,",
", but i think",
"in conclusion , due to the fact that",
"to make it convenient ,",
"nevertheless , since",
"yet because",
"now that ,",
"to put it in a nutshell , i pen down saying that ,",
"although some people argue these developments are making our life comfortable , but",
"totally ,",
"that",
"from their point of view , the fact is that",
"despite the importance of helping those beyond our national borders ,",
"in this context ,",
"as far as i am concerned , i agree with the statement that",
"it is felt by many that",
"plus",
"as you see ,",
"to begin with , in schools ,",
", possibly because",
"i agree to the argument that",
"although many people prefer communicating by telephone or face - to - face , personally i believe that",
"firstly , it is undoubtedly that",
"however , i do think that",
"in fact",
"simultaneously ,",
"following the demonstration of a computer 's contribution in children 's study and building skills which are to be used in their career afterwards , it is agreed that",
"although both companies have their advantages and disadvantages ,",
"however , i am utterly convinced that",
"for me ,",
"i agree that",
"this example makes it clear that",
"lies in the fact that",
"they believe that",
"this argument may be true that",
"and finally ,",
"given these evidences , it can be seen that",
"it is clear to me that",
"as a conclusion , i personally think that",
"in sum , sometimes",
"undoubtedly",
"some people who hold different viewpoints may advocate that",
"for the above reasons ,",
"in a nutshell ,",
"to conclude , i would say that",
"in a word , although each of these methods have their pros and cons , i am pretty sure that",
"at the second point ,",
"to sum up , in spite of the fact that",
"first of all , the ancient latin proverb says : ' anima sana in corpore sano ' , which means that",
"to begin with , it is vital that",
"it seems that",
"because of that need ,",
"the reason for this is",
"to my mind ,",
", but in my own view ,",
"and then",
"the reason is that ,",
"nevertheless , i agree to this solution only to a certain degree because i feel that",
"for example ,",
"but i think",
"along with that ,",
"therefore , we can start to agree that",
"as well as",
"whereas ,",
"after considering the level of freedom , the studying conditions and the economic factor , if i had to choose ,",
"at first ,",
"therefore , i agree with this notion that",
"there is no deny that",
"in addition , the learning with a teacher makes us bear more burdens of studies cause they assign us homework , nevertheless ,",
"basically ,",
"it is certainly clear that",
"moreover , i have always believed that",
"one reason is that",
"thus ,",
", firmly believe that",
"to clarify my point i would like to say a memory from my childhood ,",
"i am a kind of person who believes to",
"to summarize ,",
"even though the purposes could be various ,",
"; therefore",
"however , i believe ,",
"it was said that",
"despite the convenience from up - to - date facilities ,",
"in my point of view , though",
"all in all , i am in the belief that",
"this is understandable",
"first ,",
"besides , it is obvious that",
", it is convincing that",
"all in all , i would maintain that",
"in condition ,",
"first of all ,",
"to conclude , no matter how fast and convenient modern devices bring to us in writing ,",
"based on the reasons demonstrated above , i believe that",
"in conclusion , it is my belief that",
"from this point of view , i firmly believe that",
"in the sense that",
"apparently ,",
"if you have a great love with football and your home team ,",
"in conclusion , i can certainly state that",
"in conclusion , after analyzing the pros and cons of advertising , both of the views have strong support , but it is felt that",
"interestingly ,",
"hence ,",
"except these ,",
"i believe",
"as it can be seen from the example ,",
"and as result ,",
", nevertheless ,",
"some people say that",
"first of all , it is recognized that",
"thus , i believe that",
", i can not help but assure that this is a matter of personal taste , to be clear ,",
"so i think",
"in result",
"however , even with all the conveniences , we would have to admit that",
"in short , although both sides have their equally valid arguments , i am inclined to think that although",
"however , we should bear in mind that",
"at this circumstances ,",
"nevertheless , supporters would argue that",
"as evidence of this ,",
"in conclusion , despite the fact that",
"in conclusion , in my view",
"clearly , although",
"in conclusion , i personally agree with statement that",
"overemphasizing on personal lives of famous people by media as a mass media majoring student , i think that it 's true that",
"people who hold different opinion may argue that a",
"further and even more importantly ,",
"ultimately ,",
", but i think that",
"from my point of view although",
"for example :",
"automatically ,",
", but",
"should be encouraged , i personally think that",
"although renewable resources of energy are elements of the current and future energy strategy , as far as i 'm concerned , these",
"in conclusion , i believe that",
", but on the other hand",
"furthermore , it 's undeniable that the",
"based on my arguments above , i think",
"that is the reason why",
"in the past time ,",
", too , but i imagine",
", i would state that",
"from the discussed above , we can easily draw the conclusion that",
"therefore , it seems that",
"after that ,",
"considering all those factors above ,",
"the reason why i think",
", it seem to me that",
"the fact that",
"therefore , here goes the point that",
"i completely agree that",
"as a case in point ,",
"in sum , i believe that",
"animal protectionists may base their argument on the ground that",
"another important thing to consider is that",
"in conclusion , given the reasons described above , when the advantages and disadvantages of whether there will be fewer cars in use in twenty years or not are carefully compared , the best choice is obviously stand on my side , which is that",
", for example ,",
"in such case ,",
"in my view , i agree to that fact that",
"considering all the above mentioned arguments , from my point of view",
"if someday i have children , i ’m absolutely sure that",
"while some might think",
"as the example above indicates ,",
"although those above reasons are acceptable , i would argue that",
"unlike classroom ,",
"however , i am inclined to believe that",
"in the first place ,",
"while both positions seem reasonable , my view is that ,",
"in conclusion , i would argue that",
"overall ,",
"second of all ,",
"in my opinion , i agree with the latter because",
"in conclusion , i concede that",
"i confess that",
"in my opinion , i think",
"as for the latter point ,",
"an even more detrimental fact is that",
"despite these problems , personally ,",
"based on the factors i have mentioned above , a conclusion can be drawn that although",
"it is agreed that",
"it does not seem unreasonable to suggest that",
"initially ,",
"this saying is stating that",
"in the near future ,",
"in my point of view ,",
"in result ,",
"in our hectic life , however ,",
"to conclude , i strongly believe that",
"in conclusion , as i believe that consumers should consider major needs when they purchase goods ,",
"i do believe that",
"i support the points of those who say that",
"last , but not least ,",
"in conclusion , i strongly agree with notion that",
"firstly , some people suggest that",
"in a nutshell , although",
"therefore i strongly agree with the argument that",
"alternatively i believe that",
"in other words , i think",
"ergo ,",
"some people might argue that",
"and hence",
"another significant fact which should be taken into consideration is that",
"this is an important factor because",
"in conclusion , i strongly feel that",
"what 's worse ,",
"the consequence of this case made",
"on the other hand ,",
"as we all know",
"while mother 's role is essential in a child 's life , i completely agree with the idea that",
"something else that is good is that",
"another reason is that a",
"primarily ,",
"to give a brief conclusion ,",
"as far as i am concerned ,",
"on the whole , to compare to all ways of communication , i really think that",
"therefore , i strongly agree that",
"generally , personally speaking , i believe",
"i have an impression that",
"in my opinion , i believe that",
"from my prospective ,",
"moreover ,",
"it is a well - known fact that",
"thus to conclude , it can be said that",
"it is widely seen that",
"i also believe",
"in addition , because",
"to conclude , it is now evident that",
"this means",
"however , i believe",
"personally speaking ,",
"in my opinion , though",
"; however ,",
"as a rule of thumb ,",
", so their",
"for them ,",
", as",
", hence",
"for one thing ,",
"to start with , since",
"otherwise ,",
", but i agree that",
"more ,",
"in my view point ,",
"this will be proven by analyzing how",
"the main reason why i believe artists should be funded is because",
"in sum , although each choice - few close friends or a large number of friends - presents its advantages and its weak points , i think that",
"in addition ,",
"while there is belief that technology advances means a lost in traditional cultures , i would argue that although",
"second , from an economic point of view , people often argue that",
"in conclusion , while there",
"one can not deny the fact that",
"it is certainly true that",
"my view is that",
"and i think",
"in conclusion , i think",
"in my perspective ,",
"they will argue ,",
"; consequently ,",
"with the economy development ,",
"mentioned to societies life style ,",
"i totally agree that",
"personally , i do not agree the viewpoint , because",
"in coclusion ,",
"so , i definitely believe that",
"i am biased towards the opposite side that",
]
| 34.901639
| 245
| 0.705876
|
c68e2402d54239fcf4564cf51d2c786f6c8596e0
| 3,464
|
py
|
Python
|
ruebot/ruebDB.py
|
krippix/ruebot.py
|
fdd50ee212fddf08e73d546fb287071bcb16b05c
|
[
"MIT"
] | null | null | null |
ruebot/ruebDB.py
|
krippix/ruebot.py
|
fdd50ee212fddf08e73d546fb287071bcb16b05c
|
[
"MIT"
] | null | null | null |
ruebot/ruebDB.py
|
krippix/ruebot.py
|
fdd50ee212fddf08e73d546fb287071bcb16b05c
|
[
"MIT"
] | null | null | null |
import psycopg2
from ruebot import config
import logging
class ruebDatabaseError(Exception):
pass
def dbrequest(sqlstatement, user_input):
""" Connect to the PostgreSQL database server """
#beispiel: dbconnect('SELECT id FROM users WHERE displayname=%s', 'krippix')
conn = None
try:
# read connection parameters
params = config.databaseconfig()
# connect to the PostgreSQL server
#logging.debug('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
cur.execute(sqlstatement, (user_input))
#get one of the dbresults
answer = cur.fetchone()
# close the communication with the PostgreSQL
cur.close()
return answer
except (Exception, psycopg2.DatabaseError) as error:
logging.error(error)
raise ruebDatabaseError(error)
finally:
if conn is not None:
conn.close()
#logging.debug('dbrequest: Database connection closed.')
def dbfetchall(sqlstatement, user_input):
""" Connect to the PostgreSQL database server """
#beispiel: dbconnect('SELECT id FROM users WHERE displayname=%s', 'krippix')
conn = None
try:
# read connection parameters
params = config.databaseconfig()
# connect to the PostgreSQL server
#logging.debug('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
cur.execute(sqlstatement, (user_input))
# display the PostgreSQL database server version
#print('PostgreSQL database version:')
#cur.execute('SELECT version()')
answer = cur.fetchall()
# close the communication with the PostgreSQL
cur.close()
#print("-----ANSWER_START-----")
#print(answer)
#print("------ANSWER_END------")
return answer
except (Exception, psycopg2.DatabaseError) as error:
logging.error(error)
raise ruebDatabaseError(error)
finally:
if conn is not None:
conn.close()
#logging.debug('dbfetchall: Database connection closed.')
def dbcommit(sqlstatement, user_input):
""" Connect to the PostgreSQL database server """
#beispiel: dbconnect('SELECT id FROM users WHERE displayname=%s', 'krippix')
conn = None
try:
# read connection parameters
params = config.databaseconfig()
# connect to the PostgreSQL server
#logging.debug('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
cur.execute(sqlstatement, (user_input))
#Commit statement
answer = conn.commit()
# close the communication with the PostgreSQL
cur.close()
return answer
except (Exception, psycopg2.DatabaseError) as error:
logging.error(error)
raise ruebDatabaseError(error)
finally:
if conn is not None:
conn.close()
#logging.debug('dbcommit: Database connection closed.')
| 28.393443
| 80
| 0.593245
|
c7264c2476d990625a5d1c402c88ad3c9578b312
| 7,751
|
py
|
Python
|
graphwar/utils/progbar.py
|
EdisonLeeeee/GraphWar
|
78fc9bbc0e086211ca94c26a78278f41abe97f3c
|
[
"MIT"
] | 10
|
2021-11-15T01:29:04.000Z
|
2022-03-06T06:01:13.000Z
|
graphwar/utils/progbar.py
|
EdisonLeeeee/GraphWar
|
78fc9bbc0e086211ca94c26a78278f41abe97f3c
|
[
"MIT"
] | null | null | null |
graphwar/utils/progbar.py
|
EdisonLeeeee/GraphWar
|
78fc9bbc0e086211ca94c26a78278f41abe97f3c
|
[
"MIT"
] | 1
|
2022-03-28T00:38:20.000Z
|
2022-03-28T00:38:20.000Z
|
from typing import Optional, Union, Tuple, List
import os
import sys
import time
from numbers import Number
import numpy as np
class Progbar:
"""A progress bar for display.
Parameters
----------
target : int
total number of steps expected.
width : int, optional
progress bar width on screen, by default 30
verbose : int, optional
verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose), by default 1
interval : float, optional
minimum visual progress update interval (in seconds), by default 0.05
unit_name : str, optional
display name for step counts (usually "step" or "sample"), by default 'step'
Example
-------
>>> from graphwar.utils import Progbar
>>> pbar = Progbar(5)
>>> for i in range(5):
... pbar.add(1, msg=f'current number {i}')
5/5 [==============================] - Total: 3.22ms - 643us/step- current number 4
>>> pbar = Progbar(5)
>>> for i in range(5):
... pbar.update(i+1, msg=f'current number {i}')
5/5 [==============================] - Total: 3.22ms - 643us/step- current number 4
"""
def __init__(self,
target: int,
width: int = 30,
verbose: int = 1,
interval: float = 0.05,
unit_name: str = 'step'):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules or
'PYCHARM_HOSTED' in os.environ)
self._total_width = 0
self._seen_so_far = 0
self._start = time.perf_counter()
self._last_update = 0
def update(self, current: int, msg: Optional[Union[str, List, Tuple]] = None,
finalize: Optional[bool] = None):
"""Updates the progress bar using current value.
Parameters
----------
current : int
index of current step
msg : Optional[Union[str, List, Tuple]], optional
:obj:`(name, value_for_last_step)` or string messages, by default None
finalize : Optional[bool], optional
whether this is the last update for the progress bar. If
:obj:`None`, defaults to :obj:`current >= self.target`, by default None
Raises
------
ValueError
invalid message :obj:`msg` for progress bar.
"""
if not self.verbose:
return
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
msg = msg or {}
if isinstance(msg, str):
message = ' - ' + msg
elif isinstance(msg, (dict, list, tuple)):
message = ''
if isinstance(msg, dict):
msg = msg.items()
else:
assert len(msg[0]) == 2
for k, v in msg:
message += ' - %s:' % k
if v is None:
message += ' None'
elif isinstance(v, str):
message += ' ' + v
else:
message += ' ' + self.format_num(v)
else:
raise ValueError(msg)
message = message.strip()
self._seen_so_far = current
now = time.perf_counter()
delta = now - self._start
if delta >= 1:
delta = ' %.2fs' % delta
elif delta >= 1e-3:
delta = ' %.2fms' % (delta * 1e3)
else:
delta = ' %.2fus' % (delta * 1e6)
info = ' - Total:%s' % delta
if self.verbose == 1:
if now - self._last_update < self.interval and not finalize:
return
info += ' -'
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
bar = ('%' + str(numdigits) +
'd/%d [') % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is None or finalize:
if time_per_unit >= 1 or time_per_unit == 0:
info += ' %ds/%s' % (time_per_unit, self.unit_name)
elif time_per_unit >= 1e-3:
info += ' %dms/%s' % (time_per_unit * 1e3, self.unit_name)
else:
info += ' %dus/%s' % (time_per_unit * 1e6, self.unit_name)
else:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60, eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
info += message
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if finalize:
info += '\n'
sys.stdout.write(f'{bar}{info}')
sys.stdout.flush()
elif self.verbose == 2:
if finalize:
numdigits = int(np.log10(self.target)) + 1
count = ('%' + str(numdigits) +
'd/%d') % (current, self.target)
info = count + info
info += message
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n: int, msg: Optional[Union[str, List, Tuple]] = None):
"""Add :obj:`n` steps to the progress bar.
Parameters
----------
n : int
number of steps to add to the progress bar
msg : Optional[Union[str, List, Tuple]], optional
:obj:`(name, value_for_last_step)` or string messages, by default None
"""
self.update(self._seen_so_far + n, msg)
@staticmethod
def format_num(n: int) -> str:
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
a Number.
Returns
-------
out : str
Formatted number.
"""
assert isinstance(n, Number), f'{n} is not a Number.'
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
| 32.704641
| 87
| 0.464585
|
ffb701392ececfec896c9e9fcdc1b120e82ac65f
| 4,049
|
py
|
Python
|
babi/babi_parse.py
|
cguptac/blog
|
c03f605ed1c7e750963ad49dfcb43f0d3cd3f6c0
|
[
"MIT"
] | null | null | null |
babi/babi_parse.py
|
cguptac/blog
|
c03f605ed1c7e750963ad49dfcb43f0d3cd3f6c0
|
[
"MIT"
] | null | null | null |
babi/babi_parse.py
|
cguptac/blog
|
c03f605ed1c7e750963ad49dfcb43f0d3cd3f6c0
|
[
"MIT"
] | null | null | null |
from functools import reduce
import re
import tarfile
from collections import namedtuple
import itertools
# story, question, answer, relevant
Sqar = namedtuple('StoryQuestionAnswerRelevant', ['data', 'question', 'answer', 'relevant'])
# story, question, answer
Sqa = namedtuple('StoryQuestionAnswer', ['data', 'question', 'answer'])
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format
If only_supporting is true,
only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = line.strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
supporting = [int(_) for _ in supporting.split()]
if only_supporting:
# Only select the related substory
substory = [story[i - 1] for i in supporting]
supporting=None
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a, supporting))
story.append(q)
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, max_length=None, **kwargs):
'''Given a file name, read the file, retrieve the stories,
and then convert the sentences into a single story.
If max_length is supplied,
any stories longer than max_length tokens will be discarded.
'''
only_supporting = kwargs.get('only_supporting', False)
flatten_sentences = kwargs.get('flatten_sentences', False)
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
if flatten_sentences:
data = [Sqa(flatten(story), q, answer) \
for story, q, answer, supporting in data if not max_length or len(flatten(story)) < max_length]
else:
data = [Sqar(story, q, answer, supporting) \
for story, q, answer, supporting in data if not max_length or len(story) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
xs = []
xqs = []
ys = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
# let's not forget that index 0 is reserved
y = np.zeros(len(word_idx) + 1)
y[word_idx[answer]] = 1
xs.append(x)
xqs.append(xq)
ys.append(y)
return pad_sequences(xs, maxlen=story_maxlen), pad_sequences(xqs, maxlen=query_maxlen), np.array(ys)
def printshape(layer, should_print=True):
if should_print:
print(layer.shape)
# get all the files with a given token, train or test
def extract_with_token(tar, token, **kwargs):
flatten_tasks = kwargs.get('flatten_tasks', True)
files = []
flat_file = []
task_names = []
for member in tar.getmembers():
if token in member.name:
tar.extract(member, './')
with open(member.name) as infile:
new_stories = get_stories(infile, **kwargs)
if flatten_tasks:
for story in new_stories:
flat_file.append(story)
else:
files.append(new_stories)
task_names.append([member.name])
if flatten_tasks:
return flat_file
else:
return files, task_names
def flatten_nested(nested_list):
return list(itertools.chain.from_iterable(nested_list))
| 33.46281
| 111
| 0.602618
|
1ca921cfb6fb177125a51ecd463164670c4c647a
| 1,552
|
py
|
Python
|
integration/test_dev.py
|
nicolapaoli/nephos
|
0871c355a6efbdaa0beb1c076543d285d177b5a4
|
[
"Apache-2.0"
] | null | null | null |
integration/test_dev.py
|
nicolapaoli/nephos
|
0871c355a6efbdaa0beb1c076543d285d177b5a4
|
[
"Apache-2.0"
] | 174
|
2019-06-24T10:56:43.000Z
|
2021-08-02T05:38:57.000Z
|
integration/test_dev.py
|
nicolapaoli/nephos
|
0871c355a6efbdaa0beb1c076543d285d177b5a4
|
[
"Apache-2.0"
] | 1
|
2019-05-02T11:43:54.000Z
|
2019-05-02T11:43:54.000Z
|
import os
from nephos.fabric.settings import load_config, check_cluster
from nephos.helpers.misc import execute
from nephos.runners import runner_fabric
CURRENT_PATH = os.path.abspath(os.path.split(__file__)[0])
class TestIntegrationDev:
# We will check cluster and flatly refuse to do integration testing unless on 'minikube'
CONTEXT = "minikube"
CONFIG = os.path.join(CURRENT_PATH, "..", "examples", "dev", "nephos_config.yaml")
def test_integration_dev(self):
# Get options
opts = load_config(self.CONFIG)
# TODO: There should be a more elegant way of obtaining all the releases
releases = (
[key for key in opts["cas"].keys()]
+ [key + "-pg" for key in opts["cas"].keys()]
+ opts["orderers"]["names"]
+ [("cdb-" + key) for key in opts["peers"]["names"]]
+ [key for key in opts["peers"]["names"]]
)
# Run Fabric script
check_cluster(
self.CONTEXT
) # Dangerous operation, recheck we have not shifted context
runner_fabric(opts)
# Delete all deployments from Helm
check_cluster(
self.CONTEXT
) # Dangerous operation, recheck we have not shifted context
execute(f"helm delete --purge {' '.join(releases)}")
# Delete the namespaces
check_cluster(
self.CONTEXT
) # Dangerous operation, recheck we have not shifted context
execute("kubectl delete ns orderers peers".format(" ".join(releases)))
| 34.488889
| 92
| 0.623711
|
b1a83c6ee00a4d941f84ba11569678597fb89472
| 8,900
|
py
|
Python
|
DataProcessor/pruning_heuristics.py
|
Milozms/feedforward-RE
|
a0415b6b835287d7257936c7cbb03abb467a17a8
|
[
"MIT"
] | 1
|
2019-08-25T00:44:27.000Z
|
2019-08-25T00:44:27.000Z
|
DataProcessor/pruning_heuristics.py
|
cherry979988/feedforward-RE
|
546a608a8cb5b35c475e577995df70a89affa15e
|
[
"MIT"
] | null | null | null |
DataProcessor/pruning_heuristics.py
|
cherry979988/feedforward-RE
|
546a608a8cb5b35c475e577995df70a89affa15e
|
[
"MIT"
] | null | null | null |
__author__ = 'wenqihe'
import os
import operator
import sys
from collections import defaultdict
reload(sys)
sys.setdefaultencoding('utf8')
class PruneStrategy:
def __init__(self, strategy):
self._strategy = strategy
self.pruner = self.no_prune
def no_prune(self, fileid, is_ground, labels):
new_labels = set(labels)
return list(new_labels)
def prune(indir, outdir, strategy, feature_number, type_number, neg_label_weight, isRelationMention, emDir):
prune_strategy = PruneStrategy(strategy=strategy)
type_file = open((os.path.join(indir+'/type.txt')), 'r')
negLabelIndex = -1
for line in type_file:
seg = line.strip('\r\n').split('\t')
if seg[0] == "None":
negLabelIndex = int(seg[1])
print "neg label : ", negLabelIndex
break
mids = {}
ground_truth = set()
count = 0
train_y = os.path.join(indir+'/train_y.txt')
train_x = os.path.join(indir+'/train_x_new.txt')
qa_x = os.path.join(indir+'/qa_x_new.txt')
test_x = os.path.join(indir+'/test_x.txt')
test_y = os.path.join(indir+ '/test_y.txt')
mention_file = os.path.join(outdir+ '/mention.txt')
mention_type = os.path.join(outdir+ '/mention_type.txt')
mention_feature = os.path.join(outdir+ '/mention_feature.txt')
mention_type_test = os.path.join(outdir+'/mention_type_test.txt')
mention_feature_test = os.path.join(outdir+ '/mention_feature_test.txt')
feature_type = os.path.join(outdir+ '/feature_type.txt')
qa_pair = os.path.join(indir+'/qa_pair.txt')
qa_mpair = os.path.join(indir+'/qa_mpair.txt')
mention_file_qa = os.path.join(outdir+ '/mention_qa.txt')
mention_feature_qa = os.path.join(outdir+ '/mention_feature_qa.txt')
feature_feature_qa = os.path.join(outdir+ '/feature_feature_qa.txt')
mention_question = os.path.join(indir+'/mention_question.txt')
mention_pairs = os.path.join(indir+'/mention_pairs_qa.txt')
# generate mention_type, and mention_feature for the training & qa corpus
with open(train_x) as fx, open(train_y) as fy, open(test_y) as ft, \
open(mention_type,'w') as gt, open(mention_feature,'w') as gf:
for line in ft:
seg = line.strip('\r\n').split('\t')
ground_truth.add(seg[0])
# generate mention_type and mention_feature
for line in fy:
line2 = fx.readline()
seg = line.strip('\r\n').split('\t')
seg_split = seg[0].split('_')
fileid = '_'.join(seg_split[:-3])
labels = [int(x) for x in seg[1].split(',')]
new_labels = prune_strategy.pruner(fileid=fileid, is_ground=(seg[0] in ground_truth), labels=labels)
if new_labels is not None:
seg2 = line2.strip('\r\n').split('\t')
if len(seg2) != 2:
print seg2
try:
features = seg2[1].split(',')
except:
features = [] #may have empty features after feature filtering
if seg[0] in mids:
continue
for l in new_labels:
if l == negLabelIndex: # discount weight for None label (index is 1)
gt.write(str(count)+'\t'+str(l)+'\t' + str(neg_label_weight) + '\n')
else:
gt.write(str(count)+'\t'+str(l)+'\t1\n')
for f in features:
gf.write(str(count)+'\t'+f+'\t1\n')
mids[seg[0]] = count
count += 1
if count%200000==0:
print count
print count
print 'start qa'
count_qa = 0
mids_qa = {}
with open(qa_x) as fx, open(mention_feature_qa, 'w') as gmf:
for line in fx:
seg = line.strip('\r\n').split('\t')
if len(seg) != 2:
print seg
try:
features = seg[1].split(',')
except:
features = [] #may have empty features after feature filtering
if seg[0] in mids_qa:
continue
for f in features:
gmf.write(str(count_qa)+'\t'+f+'\t1\n')
mids_qa[seg[0]] = count_qa
count_qa += 1
if count_qa%200000==0:
print count_qa
print count_qa
# generate mention_type_test, and mention_feature_test for the test corpus
print count
print 'start test'
with open(test_x) as fx, open(test_y) as fy,\
open(mention_type_test,'w') as gt, open(mention_feature_test, 'w') as gf:
# generate mention_type and mention_feature
for line in fy:
line2 = fx.readline()
seg = line.strip('\r\n').split('\t')
try:
labels = [int(x) for x in seg[1].split(',')]
except:
labels = [] ### if it's negative example (no type label), make it a []
seg2 = line2.strip('\r\n').split('\t')
features = seg2[1].split(',')
if seg[0] in mids:
mid = mids[seg[0]]
else:
mid = count
# print line2
mids[seg[0]] = count
count += 1
for l in labels:
gt.write(str(mid)+'\t'+str(l)+'\t1\n')
for f in features:
gf.write(str(mid)+'\t'+f+'\t1\n')
print count
print 'start mention part'
# generate mention.txt
with open(mention_file,'w') as m:
sorted_mentions = sorted(mids.items(), key=operator.itemgetter(1))
for tup in sorted_mentions:
m.write(tup[0]+'\t'+str(tup[1])+'\n')
if isRelationMention:
entity_mention_file = os.path.join(emDir+ '/mention.txt')
triples_file = os.path.join(outdir+ '/triples.txt')
with open(entity_mention_file, 'r') as emFile, open(triples_file, 'w') as triplesFile:
emIdByString ={}
for line in emFile.readlines():
seg = line.strip('\r\n').split('\t')
emIdByString[seg[0]] = seg[1]
for tup in sorted_mentions:
seg = tup[0].split('_')
em1id = emIdByString['_'.join(seg[:-2])]
em2id = emIdByString['_'.join(seg[:2]+seg[-2:])]
rmid = tup[1]
triplesFile.write(em1id+'\t'+em2id+'\t'+str(rmid)+'\n')
print 'start mention_qa part'
# generate mention.txt
with open(mention_file_qa,'w') as m:
sorted_mentions_qa = sorted(mids_qa.items(), key=operator.itemgetter(1))
for tup in sorted_mentions_qa:
m.write(tup[0]+'\t'+str(tup[1])+'\n')
print 'start feature_type part'
with open(mention_feature) as f1, open(mention_type) as f2,\
open(feature_type,'w') as g:
fm = defaultdict(set)
tm = defaultdict(set)
for line in f1:
seg = line.strip('\r\n').split('\t')
i = int(seg[0])
j = int(seg[1])
fm[j].add(i)
for line in f2:
seg = line.strip('\r\n').split('\t')
i = int(seg[0])
j = int(seg[1])
tm[j].add(i)
for i in xrange(feature_number):
for j in xrange(type_number):
if j == negLabelIndex: ### discount weight for None label "1"
temp = len(fm[i]&tm[j]) * neg_label_weight
else:
temp = len(fm[i]&tm[j])
if temp > 0:
g.write(str(i)+'\t'+str(j)+'\t'+str(temp)+'\n')
print 'start feature_feature_qa part'
f_pairs2count = defaultdict(int)
with open(qa_x) as f1, open(feature_feature_qa,'w') as g:
ct = 0
for line in f1:
ct += 1
if ct%1000 == 0:
print(ct)
seg = line.strip('\r\n').split('\t')
features = seg[1].split(',')
for i in range(len(features)):
for j in range(i+1, len(features)):
f1 = features[i]
f2 = features[j]
f_pairs2count[frozenset([f1,f2])] += 1
for f_pair in f_pairs2count:
f1 = list(f_pair)[0]
f2 = list(f_pair)[1]
g.write(str(f1)+'\t'+str(f2)+'\t'+str(f_pairs2count[f_pair])+'\n')
print 'start qa relation mention pairs part'
with open(qa_mpair) as fin, open(mention_pairs, 'w') as fout:
for line in fin:
seg = line.strip('\r\n').split('\t')
fout.write((str(mids_qa[seg[0]])+'\t'+str(mids_qa[seg[1]])+'\t'+seg[2]+'\n'))
print 'start qa mention question pairs part'
with open(qa_pair) as fin, open(mention_question, 'w') as fout:
for line in fin:
seg = line.strip('\r\n').split('\t')
fout.write((str(mids_qa[seg[0]])+'\t'+seg[1]+'\t'+seg[2]+'\n'))
| 40.639269
| 112
| 0.53573
|
3cb22b1a6f36292b226cbb31e737f4ce4dd1d20f
| 8,526
|
py
|
Python
|
cfgov/ask_cfpb/views.py
|
hkeeler/cfgov-refresh
|
33977186a8e9cb972e63cc22baa357d381316aec
|
[
"CC0-1.0"
] | null | null | null |
cfgov/ask_cfpb/views.py
|
hkeeler/cfgov-refresh
|
33977186a8e9cb972e63cc22baa357d381316aec
|
[
"CC0-1.0"
] | null | null | null |
cfgov/ask_cfpb/views.py
|
hkeeler/cfgov-refresh
|
33977186a8e9cb972e63cc22baa357d381316aec
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import unicode_literals
import json
from urllib.parse import urljoin
from django.http import Http404, HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.template.defaultfilters import slugify
from haystack.query import SearchQuerySet
from wagtailsharing.models import SharingSite
from wagtailsharing.views import ServeView
from bs4 import BeautifulSoup as bs
from ask_cfpb.models import AnswerPage, AnswerResultsPage, AskSearch
try:
from wagtail.core.models import Site
except ImportError: # pragma: no cover; fallback for Wagtail < 2.0
from wagtail.wagtailcore.models import Site
def annotate_links(answer_text):
"""
Parse and annotate links from answer text.
Return the annotated answer
and an enumerated list of links as footnotes.
"""
try:
_site = Site.objects.get(is_default_site=True)
except Site.DoesNotExist:
raise RuntimeError('no default wagtail site configured')
footnotes = []
soup = bs(answer_text, 'lxml')
links = soup.findAll('a')
index = 1
for link in links:
if not link.get('href'):
continue
footnotes.append(
(index, urljoin(_site.root_url, link.get('href'))))
parent = link.parent
link_location = parent.index(link)
super_tag = soup.new_tag('sup')
super_tag.string = str(index)
parent.insert(link_location + 1, super_tag)
index += 1
return (str(soup), footnotes)
def view_answer(request, slug, language, answer_id):
answer_page = get_object_or_404(
AnswerPage, language=language, answer_base__id=answer_id)
if answer_page.live is False:
raise Http404
if answer_page.redirect_to_page:
new_page = answer_page.redirect_to_page
return redirect(new_page.url, permanent=True)
if "{}-{}-{}".format(slug, language, answer_id) != answer_page.slug:
return redirect(answer_page.url, permanent=True)
# We don't want to call answer_page.serve(request) here because that
# would bypass wagtail-sharing logic that allows for review of draft
# revisions via a sharing site.
try:
sharing_site = SharingSite.find_for_request(request)
except SharingSite.DoesNotExist:
return answer_page.serve(request)
page, args, kwargs = ServeView.route(
sharing_site.site,
request,
request.path
)
return ServeView.serve(page, request, args, kwargs)
def ask_search(request, language='en', as_json=False):
if 'selected_facets' in request.GET:
return redirect_ask_search(request, language=language)
language_map = {
'en': 'ask-cfpb-search-results',
'es': 'respuestas'
}
results_page = get_object_or_404(
AnswerResultsPage,
language=language,
slug=language_map[language]
)
# If there's no query string, don't search
search_term = request.GET.get('q', '')
if not search_term:
results_page.query = ''
results_page.result_query = ''
return results_page.serve(request)
search = AskSearch(search_term=search_term, language=language)
if search.queryset.count() == 0:
search.suggest(request=request)
if as_json:
results = {
'query': search_term,
'result_query': search.search_term,
'suggestion': search.suggestion,
'results': [
{
'question': result.autocomplete,
'url': result.url,
'text': result.text,
'preview': result.preview,
}
for result in search.queryset
]
}
json_results = json.dumps(results)
return HttpResponse(json_results, content_type='application/json')
results_page.query = search_term
results_page.result_query = search.search_term
results_page.suggestion = search.suggestion
results_page.answers = [
(result.url, result.autocomplete, result.preview)
for result in search.queryset
]
return results_page.serve(request)
def ask_autocomplete(request, language='en'):
term = request.GET.get(
'term', '').strip().replace('<', '')
if not term:
return JsonResponse([], safe=False)
sqs = SearchQuerySet().models(AnswerPage)
sqs = sqs.autocomplete(
autocomplete=term,
language=language
)
results = [{'question': result.autocomplete,
'url': result.url}
for result in sqs[:20]]
return JsonResponse(results, safe=False)
def redirect_ask_search(request, language='en'):
"""
Redirect legacy knowledgebase requests built via query strings.
Prior to 2016, Ask CFPB (knowledgebase) built category, audience and
search-tag pages based on query string facets. When Ask was migrated
to Wagtail, we simplified the page structure and left this view
to route legacy requests using the old query string faceting routine.
Knowledgebase used /askcfpb/ (no hyphen) as its base URL node.
If the legacy query string has no 'q' element or a blank one, we return
the current base /ask-cfpb/search/ page.
If the query string has a 'q' query, we'll run that search.
Otherwise, we look for legacy faceting.
We want to catch these search facets, in this order:
- selected_facets=category_exact:
- selected_facets=audience_exact
- selected_facets=tag_exact:
"""
category_facet = 'category_exact:'
audience_facet = 'audience_exact:'
tag_facet = 'tag_exact:'
if request.GET.get('q'):
querystring = request.GET.get('q').strip()
if not querystring:
return redirect('/ask-cfpb/search/', permanent=True)
return redirect(
'/ask-cfpb/search/?q={query}'.format(
query=querystring, permanent=True))
else:
facets = request.GET.getlist('selected_facets')
if not facets or not facets[0]:
return redirect(
'/ask-cfpb/search/', permanent=True)
def redirect_to_category(category, language):
if language == 'es':
return redirect(
'/es/obtener-respuestas/categoria-{category}/'.format(
category=category), permanent=True)
return redirect(
'/ask-cfpb/category-{category}/'.format(
category=category), permanent=True)
def redirect_to_audience(audience):
"""We currently only offer audience pages to English users."""
return redirect(
'/ask-cfpb/audience-{audience}/'.format(
audience=audience), permanent=True)
def redirect_to_tag(tag, language):
"""Handle tags passed with underscore separators."""
if language == 'es':
return redirect(
'/es/obtener-respuestas/buscar-por-etiqueta/{tag}/'.format(
tag=tag), permanent=True)
else:
return redirect(
'/ask-cfpb/search-by-tag/{tag}/'.format(
tag=tag), permanent=True)
# Redirect by facet value, if there is one, starting with category.
# We want to exhaust facets each time, so we need three loops.
# We act only on the first of any facet type found.
# Most search redirects will find a category and return.
for facet in facets:
if category_facet in facet:
category = facet.replace(category_facet, '')
if category:
slug = slugify(category) # handle uppercase and spaces
return redirect_to_category(slug, language)
for facet in facets:
if audience_facet in facet:
audience_raw = facet.replace(audience_facet, '')
if audience_raw:
audience = slugify(audience_raw.replace('+', '-'))
return redirect_to_audience(audience)
for facet in facets:
if tag_facet in facet:
raw_tag = facet.replace(tag_facet, '')
if raw_tag:
tag = raw_tag.replace(
' ', '_').replace(
'%20', '_').replace(
'+', '_')
return redirect_to_tag(tag, language)
raise Http404
| 35.08642
| 79
| 0.618344
|
964f9c82688001b3291f6a531f52c09d5a27295d
| 436
|
py
|
Python
|
tests/test_parser.py
|
curoky/click
|
fb547a6d08d1a063c444ae52a7a4e327dc58f68a
|
[
"BSD-3-Clause"
] | 9,730
|
2016-03-30T16:21:20.000Z
|
2022-03-31T23:29:09.000Z
|
tests/test_parser.py
|
curoky/click
|
fb547a6d08d1a063c444ae52a7a4e327dc58f68a
|
[
"BSD-3-Clause"
] | 1,472
|
2016-03-30T17:11:48.000Z
|
2022-03-31T20:55:39.000Z
|
tests/test_parser.py
|
spanglerco/click
|
c3687bf1d2d5c175ee50dc083fce5b19de152de0
|
[
"BSD-3-Clause"
] | 1,423
|
2016-03-31T08:24:28.000Z
|
2022-03-30T03:04:06.000Z
|
import pytest
from click.parser import split_arg_string
@pytest.mark.parametrize(
("value", "expect"),
[
("cli a b c", ["cli", "a", "b", "c"]),
("cli 'my file", ["cli", "my file"]),
("cli 'my file'", ["cli", "my file"]),
("cli my\\", ["cli", "my"]),
("cli my\\ file", ["cli", "my file"]),
],
)
def test_split_arg_string(value, expect):
assert split_arg_string(value) == expect
| 24.222222
| 46
| 0.516055
|
15616ae1a7ca54e4846bb3c12892f737ed05d729
| 9,334
|
py
|
Python
|
src/Network/deepddg/feature118/regressor/SimpleConv1D_CrossValid_Epoch.py
|
ruiyangsong/mCNN
|
889f182245f919fb9c7a8d97965b11576b01a96c
|
[
"MIT"
] | null | null | null |
src/Network/deepddg/feature118/regressor/SimpleConv1D_CrossValid_Epoch.py
|
ruiyangsong/mCNN
|
889f182245f919fb9c7a8d97965b11576b01a96c
|
[
"MIT"
] | null | null | null |
src/Network/deepddg/feature118/regressor/SimpleConv1D_CrossValid_Epoch.py
|
ruiyangsong/mCNN
|
889f182245f919fb9c7a8d97965b11576b01a96c
|
[
"MIT"
] | null | null | null |
import os, json
import sys
import time
import numpy as np
from sklearn.utils import class_weight
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
from keras import backend as K
from keras.backend.tensorflow_backend import set_session
from keras import Input, models, layers, optimizers, callbacks
from mCNN.Network.metrics import test_report_reg, pearson_r, rmse
from keras.utils import to_categorical
from matplotlib import pyplot as plt
'''
基于所有的训练数据,测试独立测试集(无验证集)做 blind test
选择固定的epoch,做了10次 blind test,第k次blind test的训练数据为k-th train append k-th val
'''
def data(train_data_pth,test_data_pth, val_data_pth):
## train data
train_data = np.load(train_data_pth)
x_train = train_data['x']
y_train = train_data['y']
ddg_train = train_data['ddg'].reshape(-1)
# class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train.reshape(-1))
# class_weights_dict = dict(enumerate(class_weights))
## valid data
val_data = np.load(val_data_pth)
x_val = val_data['x']
y_val = val_data['y']
ddg_val = val_data['ddg'].reshape(-1)
x_train = np.vstack((x_train,x_val))
y_train = np.vstack((y_train,y_val))
ddg_train = np.hstack((ddg_train,ddg_val))
## test data
test_data = np.load(test_data_pth)
x_test = test_data['x']
y_test = test_data['y']
ddg_test = test_data['ddg'].reshape(-1)
# sort row default is chain, pass
# reshape and one-hot
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# normalization
train_shape = x_train.shape
test_shape = x_test.shape
col_train = train_shape[-1]
col_test = test_shape[-1]
x_train = x_train.reshape((-1, col_train))
x_test = x_test.reshape((-1, col_test))
mean = x_train.mean(axis=0)
std = x_train.std(axis=0)
std[np.argwhere(std == 0)] = 0.01
x_train -= mean
x_train /= std
x_test -= mean
x_test /= std
x_train = x_train.reshape(train_shape)
x_test = x_test.reshape(test_shape)
# reshape
# x_train = x_train.reshape(x_train.shape + (1,))
# x_test = x_test.reshape(x_test.shape + (1,))
return x_train, y_train, ddg_train, x_test, y_test, ddg_test
def ieee_net(x_train, y_train, ddg_train):
row_num, col_num = x_train.shape[1:3]
verbose = 1
batch_size = 64
epochs = int(sys.argv[1]) #[15, 12, 16, 29, 16, 12, 10, 31, 10, 19]
metrics = ('mae', pearson_r, rmse)
def step_decay(epoch):
# drops as progression proceeds, good for sgd
if epoch > 0.9 * epochs:
lr = 0.00001
elif epoch > 0.75 * epochs:
lr = 0.0001
elif epoch > 0.5 * epochs:
lr = 0.001
else:
lr = 0.01
print('lr: %f' % lr)
return lr
lrate = callbacks.LearningRateScheduler(step_decay, verbose=verbose)
my_callbacks = [
lrate
]
network = models.Sequential()
network.add(layers.Conv1D(filters=16, kernel_size=5, activation='relu', input_shape=(row_num, col_num)))
network.add(layers.MaxPooling1D(pool_size=2))
network.add(layers.Conv1D(32, 5, activation='relu'))
network.add(layers.MaxPooling1D(pool_size=2))
network.add(layers.Conv1D(64, 3, activation='relu'))
network.add(layers.MaxPooling1D(pool_size=2))
network.add(layers.Flatten())
network.add(layers.Dense(128, activation='relu'))
network.add(layers.Dropout(0.5))
network.add(layers.Dense(16, activation='relu'))
network.add(layers.Dropout(0.3))
network.add(layers.Dense(1))
# print(network.summary())
# rmsp = optimizers.RMSprop(lr=0.0001, decay=0.1)
rmsp = optimizers.RMSprop(lr=0.0001)
network.compile(optimizer=rmsp,#'rmsprop', # SGD,adam,rmsprop
loss='mse',
metrics=list(metrics)) # mae平均绝对误差(mean absolute error) accuracy
result = network.fit(x=x_train,
y=ddg_train,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=my_callbacks,
shuffle=True,
)
return network, result.history
if __name__ == '__main__':
epochs = int(sys.argv[1])
from mCNN.queueGPU import queueGPU
CUDA_rate = '0.2'
## config TF
queueGPU(USER_MEM=3000, INTERVAL=60)
# os.environ['CUDA_VISIBLE_DEVICES'] = CUDA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
if CUDA_rate != 'full':
config = tf.ConfigProto()
if float(CUDA_rate)<0.1:
config.gpu_options.allow_growth = True
else:
config.gpu_options.per_process_gpu_memory_fraction = float(CUDA_rate)
set_session(tf.Session(config=config))
# modeldir = '/dl/sry/mCNN/src/Network/deepddg/regressor/TrySimpleConv1D_CrossValid_%s'%time.strftime("%Y.%m.%d.%H.%M.%S", time.localtime())
modeldir = '/dl/sry/mCNN/src/Network/deepddg/regressor/%s_%s'%(sys.argv[0][:-3]+sys.argv[1], time.strftime("%Y.%m.%d.%H.%M.%S", time.localtime()))
os.makedirs(modeldir, exist_ok=True)
score_dict = {'pearson_coeff':[], 'std':[], 'mae':[]}
train_score_dict = {'pearson_coeff':[], 'std':[], 'mae':[]}
es_train_score_dict = {'pearson_coeff':[], 'std':[], 'mae':[]}
test_data_pth = '/dl/sry/mCNN/dataset/deepddg/npz/wild/cross_valid/cro_foldall_test_center_CA_PCA_False_neighbor_120.npz'
for i in range(10):
k_count = i+1
print('--cross validation begin, fold %s is processing.'%k_count)
train_data_pth = '/dl/sry/mCNN/dataset/deepddg/npz/wild/cross_valid/cro_fold%s_train_center_CA_PCA_False_neighbor_120.npz'%k_count
valid_data_pth = '/dl/sry/mCNN/dataset/deepddg/npz/wild/cross_valid/cro_fold%s_valid_center_CA_PCA_False_neighbor_120.npz'%k_count
x_train, y_train, ddg_train, x_test, y_test, ddg_test = data(train_data_pth,test_data_pth,valid_data_pth)
print('x_train: %s'
'\ny_train: %s'
'\nddg_train: %s'
'\nx_test: %s'
'\ny_test: %s'
'\nddg_test: %s'
% (x_train.shape, y_train.shape, ddg_train.shape,
x_test.shape, y_test.shape, ddg_test.shape))
#
# train & test
#
model, history_dict = ieee_net(x_train, y_train, ddg_train)
#
# save model architecture
#
try:
model_json = model.to_json()
with open('%s/fold_%s_model.json'%(modeldir,k_count), 'w') as json_file:
json_file.write(model_json)
except:
print('save model.json to json failed, fold_num: %s' % k_count)
#
# save model weights
#
try:
model.save_weights(filepath='%s/fold_%s_weightsFinal.h5' % (modeldir,k_count))
except:
print('save final model weights failed, fold_num: %s' % k_count)
#
# save training history
#
try:
with open('%s/fold_%s_history.dict'%(modeldir,k_count), 'w') as file:
file.write(str(history_dict))
# with open('%s/fold_%s_history.dict'%(modeldir,k_count), 'r') as file:
# print(eval(file.read()))
except:
print('save history_dict failed, fold_num: %s' % k_count)
#
# Load model
#
with open('%s/fold_%s_model.json'%(modeldir,k_count), 'r') as json_file:
loaded_model_json = json_file.read()
loaded_model = models.model_from_json(loaded_model_json) # keras.models.model_from_yaml(yaml_string)
loaded_model.load_weights(filepath='%s/fold_%s_weightsFinal.h5' % (modeldir,k_count))
#
# Test model
#
# pearson_coeff, std, mae = test_report_reg(model, x_test, ddg_test)
pearson_coeff, std, mae = test_report_reg(loaded_model, x_test, ddg_test)
print('\n----------Predict:'
'\npearson_coeff: %s, std: %s, mae: %s'
% (pearson_coeff, std, mae))
score_dict['pearson_coeff'].append(pearson_coeff)
score_dict['std'].append(std)
score_dict['mae'].append(mae)
train_score_dict['pearson_coeff'].append(history_dict['pearson_r'][-1])
train_score_dict['std'].append(history_dict['rmse'][-1])
train_score_dict['mae'].append(history_dict['mean_absolute_error'][-1])
k_count += 1
#
# save score dict
#
try:
with open('%s/fold_._score.dict' % modeldir, 'w') as file:
file.write(str(score_dict))
except:
print('save score dict failed')
#
# save AVG score
#
try:
with open('%s/fold_.avg_score_train_test.txt' % modeldir, 'w') as file:
file.writelines('----------train AVG results\n')
for key in score_dict.keys():
file.writelines('*avg(%s): %s\n'%(key,np.mean(train_score_dict[key])))
file.writelines('----------test AVG results\n')
for key in score_dict.keys():
file.writelines('*avg(%s): %s\n'%(key,np.mean(score_dict[key])))
except:
print('save AVG score failed')
print('\nAVG results','-'*10)
for key in score_dict.keys():
print('*avg(%s): %s'%(key,np.mean(score_dict[key])))
| 37.789474
| 150
| 0.617206
|
2887e7c79d3d17eba7db17112dc17c6119cf7791
| 3,073
|
py
|
Python
|
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams_service.py
|
mkozinenko/ambari
|
9cfe9559420a1f4af89a2d645af84b1ab20d6737
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams_service.py
|
mkozinenko/ambari
|
9cfe9559420a1f4af89a2d645af84b1ab20d6737
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams_service.py
|
mkozinenko/ambari
|
9cfe9559420a1f4af89a2d645af84b1ab20d6737
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# !/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from hbase_service import hbase_service
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def ams_service(name, action):
import params
if name == 'collector':
Service(params.ams_embedded_hbase_win_service_name, action=action)
Service(params.ams_collector_win_service_name, action=action)
elif name == 'monitor':
Service(params.ams_monitor_win_service_name, action=action)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def ams_service(name, action):
import params
if name == 'collector':
cmd = format("{ams_collector_script} --config {ams_collector_conf_dir}")
pid_file = format("{ams_collector_pid_dir}/ambari-metrics-collector.pid")
#no_op_test should be much more complex to work with cumulative status of collector
#removing as startup script handle it also
#no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
if params.is_hbase_distributed:
hbase_service('zookeeper', action=action)
hbase_service('master', action=action)
hbase_service('regionserver', action=action)
cmd = format("{cmd} --distributed")
if action == 'start':
if not params.hbase_tmp_dir.startswith('hdfs'):
Execute(format('{sudo} rm -f {hbase_tmp_dir}/*.tmp')
)
daemon_cmd = format("{cmd} start")
Execute(daemon_cmd,
user=params.ams_user
)
pass
elif action == 'stop':
daemon_cmd = format("{cmd} stop")
Execute(daemon_cmd,
user=params.ams_user
)
pass
pass
elif name == 'monitor':
cmd = format("{ams_monitor_script} --config {ams_monitor_conf_dir}")
pid_file = format("{ams_monitor_pid_dir}/ambari-metrics-monitor.pid")
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
if action == 'start':
daemon_cmd = format("{cmd} start")
Execute(daemon_cmd,
user=params.ams_user
)
pass
elif action == 'stop':
daemon_cmd = format("{cmd} stop")
Execute(daemon_cmd,
user=params.ams_user
)
pass
pass
pass
| 33.043011
| 96
| 0.703222
|
a270fc5c4f1bff966bb63d218db5f3a59df094b0
| 53,262
|
py
|
Python
|
qiskit_ibm/experiment/ibm_experiment_service.py
|
mriedem/qiskit-ibm
|
be7056e9f59098cb3097d9bdcf6e9f2bbc006455
|
[
"Apache-2.0"
] | null | null | null |
qiskit_ibm/experiment/ibm_experiment_service.py
|
mriedem/qiskit-ibm
|
be7056e9f59098cb3097d9bdcf6e9f2bbc006455
|
[
"Apache-2.0"
] | null | null | null |
qiskit_ibm/experiment/ibm_experiment_service.py
|
mriedem/qiskit-ibm
|
be7056e9f59098cb3097d9bdcf6e9f2bbc006455
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""IBM Quantum experiment service."""
import logging
import json
import copy
from typing import Optional, List, Dict, Union, Tuple, Any, Type
from datetime import datetime
from collections import defaultdict
from qiskit.providers.exceptions import QiskitBackendNotFoundError
from qiskit_ibm import ibm_provider # pylint: disable=unused-import
from .constants import (ExperimentShareLevel, ResultQuality,
RESULT_QUALITY_FROM_API, RESULT_QUALITY_TO_API)
from .utils import map_api_error
from .device_component import DeviceComponent
from ..utils.converters import local_to_utc_str, utc_to_local
from ..api.clients.experiment import ExperimentClient
from ..api.exceptions import RequestsApiError
from ..ibm_backend import IBMRetiredBackend
from ..exceptions import IBMApiError
from ..credentials import store_preferences
logger = logging.getLogger(__name__)
class IBMExperimentService:
"""Provides experiment related services.
This class is the main interface to invoke IBM Quantum
experiment service, which allows you to create, delete, update, query, and
retrieve experiments, experiment figures, and analysis results. The
``experiment`` attribute of
:class:`~qiskit_ibm.ibm_provider.IBMProvider` is an
instance of this class, and the main syntax for using the service is
``provider.experiment.<action>``. For example::
from qiskit_ibm import IBMProvider
provider = IBMProvider()
# Retrieve all experiments.
experiments = provider.experiment.experiments()
# Retrieve experiments with filtering.
experiment_filtered = provider.experiment.experiments(backend_name='ibmq_athens')
# Retrieve a specific experiment using its ID.
experiment = provider.experiment.experiment(EXPERIMENT_ID)
# Upload a new experiment.
new_experiment_id = provider.experiment.create_experiment(
experiment_type="T1",
backend_name="ibmq_athens",
metadata={"qubits": 5}
)
# Update an experiment.
provider.experiment.update_experiment(
experiment_id=EXPERIMENT_ID,
share_level="Group"
)
# Delete an experiment.
provider.experiment.delete_experiment(EXPERIMENT_ID)
Similar syntax applies to analysis results and experiment figures.
"""
_default_preferences = {"auto_save": False}
def __init__(
self,
provider: 'ibm_provider.IBMProvider'
) -> None:
"""IBMExperimentService constructor.
Args:
provider: IBM Quantum account provider.
"""
super().__init__()
self._provider = provider
self._api_client = ExperimentClient(provider.credentials)
self._preferences = copy.deepcopy(self._default_preferences)
self._preferences.update(provider.credentials.preferences.get('experiments', {}))
def backends(self) -> List[Dict]:
"""Return a list of backends that can be used for experiments.
Returns:
A list of backends.
"""
return self._api_client.experiment_devices()
def create_experiment(
self,
experiment_type: str,
backend_name: str,
metadata: Optional[Dict] = None,
experiment_id: Optional[str] = None,
parent_id: Optional[str] = None,
job_ids: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
notes: Optional[str] = None,
share_level: Optional[Union[str, ExperimentShareLevel]] = None,
start_datetime: Optional[Union[str, datetime]] = None,
json_encoder: Type[json.JSONEncoder] = json.JSONEncoder,
**kwargs: Any
) -> str:
"""Create a new experiment in the database.
Args:
experiment_type: Experiment type.
backend_name: Name of the backend the experiment ran on.
metadata: Experiment metadata.
experiment_id: Experiment ID. It must be in the ``uuid4`` format.
One will be generated if not supplied.
parent_id: The experiment ID of the parent experiment.
The parent experiment must exist, must be on the same backend as the child,
and an experiment cannot be its own parent.
job_ids: IDs of experiment jobs.
tags: Tags to be associated with the experiment.
notes: Freeform notes about the experiment.
share_level: The level at which the experiment is shared. This determines who can
view the experiment (but not update it). This defaults to "private"
for new experiments. Possible values include:
- private: The experiment is only visible to its owner (default)
- project: The experiment is shared within its project
- group: The experiment is shared within its group
- hub: The experiment is shared within its hub
- public: The experiment is shared publicly regardless of provider
start_datetime: Timestamp when the experiment started, in local time zone.
json_encoder: Custom JSON encoder to use to encode the experiment.
kwargs: Additional experiment attributes that are not supported and will be ignored.
Returns:
Experiment ID.
Raises:
IBMExperimentEntryExists: If the experiment already exits.
IBMApiError: If the request to the server failed.
"""
# pylint: disable=arguments-differ
if kwargs:
logger.info("Keywords %s are not supported by IBM Quantum experiment service "
"and will be ignored.",
kwargs.keys())
data = {
'type': experiment_type,
'device_name': backend_name,
'hub_id': self._provider.credentials.hub,
'group_id': self._provider.credentials.group,
'project_id': self._provider.credentials.project
}
data.update(self._experiment_data_to_api(metadata=metadata,
experiment_id=experiment_id,
parent_id=parent_id,
job_ids=job_ids,
tags=tags,
notes=notes,
share_level=share_level,
start_dt=start_datetime))
with map_api_error(f"Experiment {experiment_id} already exists."):
response_data = self._api_client.experiment_upload(json.dumps(data, cls=json_encoder))
return response_data['uuid']
def update_experiment(
self,
experiment_id: str,
metadata: Optional[Dict] = None,
job_ids: Optional[List[str]] = None,
notes: Optional[str] = None,
tags: Optional[List[str]] = None,
share_level: Optional[Union[str, ExperimentShareLevel]] = None,
end_datetime: Optional[Union[str, datetime]] = None,
json_encoder: Type[json.JSONEncoder] = json.JSONEncoder,
**kwargs: Any,
) -> None:
"""Update an existing experiment.
Args:
experiment_id: Experiment ID.
metadata: Experiment metadata.
job_ids: IDs of experiment jobs.
notes: Freeform notes about the experiment.
tags: Tags to be associated with the experiment.
share_level: The level at which the experiment is shared. This determines who can
view the experiment (but not update it). This defaults to "private"
for new experiments. Possible values include:
- private: The experiment is only visible to its owner (default)
- project: The experiment is shared within its project
- group: The experiment is shared within its group
- hub: The experiment is shared within its hub
- public: The experiment is shared publicly regardless of provider
end_datetime: Timestamp for when the experiment ended, in local time.
json_encoder: Custom JSON encoder to use to encode the experiment.
kwargs: Additional experiment attributes that are not supported and will be ignored.
Raises:
IBMExperimentEntryNotFound: If the experiment does not exist.
IBMApiError: If the request to the server failed.
"""
# pylint: disable=arguments-differ
if kwargs:
logger.info("Keywords %s are not supported by IBM Quantum experiment service "
"and will be ignored.",
kwargs.keys())
data = self._experiment_data_to_api(metadata=metadata,
job_ids=job_ids,
tags=tags,
notes=notes,
share_level=share_level,
end_dt=end_datetime)
if not data:
logger.warning("update_experiment() called with nothing to update.")
return
with map_api_error(f"Experiment {experiment_id} not found."):
self._api_client.experiment_update(experiment_id, json.dumps(data, cls=json_encoder))
def _experiment_data_to_api(
self,
metadata: Optional[Dict] = None,
experiment_id: Optional[str] = None,
parent_id: Optional[str] = None,
job_ids: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
notes: Optional[str] = None,
share_level: Optional[Union[str, ExperimentShareLevel]] = None,
start_dt: Optional[Union[str, datetime]] = None,
end_dt: Optional[Union[str, datetime]] = None,
) -> Dict:
"""Convert experiment data to API request data.
Args:
metadata: Experiment metadata.
experiment_id: Experiment ID.
parent_id: Parent experiment ID
job_ids: IDs of experiment jobs.
tags: Tags to be associated with the experiment.
notes: Freeform notes about the experiment.
share_level: The level at which the experiment is shared.
start_dt: Experiment start time.
end_dt: Experiment end time.
Returns:
API request data.
"""
data = {} # type: Dict[str, Any]
if metadata:
data['extra'] = metadata
if experiment_id:
data['uuid'] = experiment_id
if parent_id:
data['parent_experiment_uuid'] = parent_id
if share_level:
if isinstance(share_level, str):
share_level = ExperimentShareLevel(share_level.lower())
data['visibility'] = share_level.value
if tags:
data['tags'] = tags
if job_ids:
data['jobs'] = job_ids
if notes:
data['notes'] = notes
if start_dt:
data['start_time'] = local_to_utc_str(start_dt)
if end_dt:
data['end_time'] = local_to_utc_str(end_dt)
return data
def experiment(
self,
experiment_id: str,
json_decoder: Type[json.JSONDecoder] = json.JSONDecoder
) -> Dict:
"""Retrieve a previously stored experiment.
Args:
experiment_id: Experiment ID.
json_decoder: Custom JSON decoder to use to decode the retrieved experiment.
Returns:
Retrieved experiment data.
Raises:
IBMExperimentEntryNotFound: If the experiment does not exist.
IBMApiError: If the request to the server failed.
"""
with map_api_error(f"Experiment {experiment_id} not found."):
raw_data = self._api_client.experiment_get(experiment_id)
return self._api_to_experiment_data(json.loads(raw_data, cls=json_decoder))
def experiments(
self,
limit: Optional[int] = 10,
json_decoder: Type[json.JSONDecoder] = json.JSONDecoder,
device_components: Optional[List[Union[str, DeviceComponent]]] = None,
device_components_operator: Optional[str] = None,
experiment_type: Optional[str] = None,
experiment_type_operator: Optional[str] = None,
backend_name: Optional[str] = None,
tags: Optional[List[str]] = None,
tags_operator: Optional[str] = "OR",
start_datetime_after: Optional[datetime] = None,
start_datetime_before: Optional[datetime] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
exclude_public: Optional[bool] = False,
public_only: Optional[bool] = False,
exclude_mine: Optional[bool] = False,
mine_only: Optional[bool] = False,
parent_id: Optional[str] = None,
sort_by: Optional[Union[str, List[str]]] = None,
**filters: Any
) -> List[Dict]:
"""Retrieve all experiments, with optional filtering.
By default, results returned are as inclusive as possible. For example,
if you don't specify any filters, all experiments visible to you
are returned. This includes your own experiments as well as
those shared with you, from all providers you have access to
(not just from the provider you used to invoke this experiment service).
Args:
limit: Number of experiments to retrieve. ``None`` indicates no limit.
json_decoder: Custom JSON decoder to use to decode the retrieved experiments.
device_components: Filter by device components.
device_components_operator: Operator used when filtering by device components.
Valid values are ``None`` and "contains":
* If ``None``, an analysis result's device components must match
exactly for it to be included.
* If "contains" is specified, an analysis result's device components
must contain at least the values specified by the `device_components`
filter.
experiment_type: Experiment type used for filtering.
experiment_type_operator: Operator used when filtering by experiment type.
Valid values are ``None`` and "like":
* If ``None`` is specified, an experiment's type value must
match exactly for it to be included.
* If "like" is specified, an experiment's type value must
contain the value specified by `experiment_type`. For example,
``experiment_type="foo", experiment_type_operator="like"`` will match
both ``foo1`` and ``1foo``.
backend_name: Backend name used for filtering.
tags: Filter by tags assigned to experiments.
tags_operator: Logical operator to use when filtering by job tags. Valid
values are "AND" and "OR":
* If "AND" is specified, then an experiment must have all of the tags
specified in `tags` to be included.
* If "OR" is specified, then an experiment only needs to have any
of the tags specified in `tags` to be included.
start_datetime_after: Filter by the given start timestamp, in local time.
This is used to find experiments whose start date/time is after
(greater than or equal to) this local timestamp.
start_datetime_before: Filter by the given start timestamp, in local time.
This is used to find experiments whose start date/time is before
(less than or equal to) this local timestamp.
hub: Filter by hub.
group: Filter by hub and group. `hub` must also be specified if `group` is.
project: Filter by hub, group, and project. `hub` and `group` must also be
specified if `project` is.
exclude_public: If ``True``, experiments with ``share_level=public``
(that is, experiments visible to all users) will not be returned.
Cannot be ``True`` if `public_only` is ``True``.
public_only: If ``True``, only experiments with ``share_level=public``
(that is, experiments visible to all users) will be returned.
Cannot be ``True`` if `exclude_public` is ``True``.
exclude_mine: If ``True``, experiments where I am the owner will not be returned.
Cannot be ``True`` if `mine_only` is ``True``.
mine_only: If ``True``, only experiments where I am the owner will be returned.
Cannot be ``True`` if `exclude_mine` is ``True``.
parent_id: Filter experiments by this parent experiment ID.
sort_by: Specifies how the output should be sorted. This can be a single sorting
option or a list of options. Each option should contain a sort key
and a direction, separated by a semicolon. Valid sort keys are
"start_datetime" and "experiment_type".
Valid directions are "asc" for ascending or "desc" for descending.
For example, ``sort_by=["experiment_type:asc", "start_datetime:desc"]`` will
return an output list that is first sorted by experiment type in
ascending order, then by start datetime by descending order.
By default, experiments are sorted by ``start_datetime``
descending and ``experiment_id`` ascending.
**filters: Additional filtering keywords that are not supported and will be ignored.
Returns:
A list of experiments. Each experiment is a dictionary containing the
retrieved experiment data.
Raises:
ValueError: If an invalid parameter value is specified.
IBMApiError: If the request to the server failed.
"""
# pylint: disable=arguments-differ
if filters:
logger.info("Keywords %s are not supported by IBM Quantum experiment service "
"and will be ignored.",
filters.keys())
if limit is not None and (not isinstance(limit, int) or limit <= 0): # type: ignore
raise ValueError(f"{limit} is not a valid `limit`, which has to be a positive integer.")
pgh_text = ['project', 'group', 'hub']
pgh_val = [project, group, hub]
for idx, val in enumerate(pgh_val):
if val is not None and None in pgh_val[idx+1:]:
raise ValueError(f"If {pgh_text[idx]} is specified, "
f"{' and '.join(pgh_text[idx+1:])} must also be specified.")
start_time_filters = []
if start_datetime_after:
st_filter = 'ge:{}'.format(local_to_utc_str(start_datetime_after))
start_time_filters.append(st_filter)
if start_datetime_before:
st_filter = 'le:{}'.format(local_to_utc_str(start_datetime_before))
start_time_filters.append(st_filter)
if exclude_public and public_only:
raise ValueError('exclude_public and public_only cannot both be True')
if exclude_mine and mine_only:
raise ValueError('exclude_mine and mine_only cannot both be True')
converted = self._filtering_to_api(
tags=tags,
tags_operator=tags_operator,
sort_by=sort_by,
sort_map={"start_datetime": "start_time",
"experiment_type": "type"},
device_components=device_components,
device_components_operator=device_components_operator,
item_type=experiment_type,
item_type_operator=experiment_type_operator
)
experiments = []
marker = None
while limit is None or limit > 0:
with map_api_error(f"Request failed."):
response = self._api_client.experiments(
limit=limit,
marker=marker,
backend_name=backend_name,
experiment_type=converted["type"],
start_time=start_time_filters,
device_components=converted["device_components"],
tags=converted["tags"],
hub=hub, group=group, project=project,
exclude_public=exclude_public,
public_only=public_only,
exclude_mine=exclude_mine,
mine_only=mine_only,
parent_id=parent_id,
sort_by=converted["sort_by"])
raw_data = json.loads(response, cls=json_decoder)
marker = raw_data.get('marker')
for exp in raw_data['experiments']:
experiments.append(self._api_to_experiment_data(exp))
if limit:
limit -= len(raw_data['experiments'])
if not marker: # No more experiments to return.
break
return experiments
def _api_to_experiment_data(
self,
raw_data: Dict,
) -> Dict:
"""Convert API response to experiment data.
Args:
raw_data: API response
Returns:
Converted experiment data.
"""
backend_name = raw_data['device_name']
try:
backend = self._provider.get_backend(backend_name)
except QiskitBackendNotFoundError:
backend = IBMRetiredBackend.from_name(backend_name=backend_name,
provider=self._provider,
credentials=self._provider.credentials,
api=None)
extra_data: Dict[str, Any] = {}
self._convert_dt(raw_data.get('created_at', None), extra_data, 'creation_datetime')
self._convert_dt(raw_data.get('start_time', None), extra_data, 'start_datetime')
self._convert_dt(raw_data.get('end_time', None), extra_data, 'end_datetime')
self._convert_dt(raw_data.get('updated_at', None), extra_data, 'updated_datetime')
out_dict = {
"experiment_type": raw_data['type'],
"backend": backend,
"experiment_id": raw_data['uuid'],
"parent_id": raw_data.get('parent_experiment_uuid', None),
"tags": raw_data.get("tags", None),
"job_ids": raw_data['jobs'],
"share_level": raw_data.get("visibility", None),
"metadata": raw_data.get("extra", None),
"figure_names": raw_data.get("plot_names", None),
"notes": raw_data.get("notes", ""),
"hub": raw_data.get("hub_id", ""),
"group": raw_data.get("group_id", ""),
"project": raw_data.get("project_id", ""),
"owner": raw_data.get("owner", ""),
**extra_data
}
return out_dict
def _convert_dt(
self,
timestamp: Optional[str],
data: Dict,
field_name: str
) -> None:
"""Convert input timestamp.
Args:
timestamp: Timestamp to be converted.
data: Data used to stored the converted timestamp.
field_name: Name used to store the converted timestamp.
"""
if not timestamp:
return
data[field_name] = utc_to_local(timestamp)
def delete_experiment(self, experiment_id: str) -> None:
"""Delete an experiment.
Args:
experiment_id: Experiment ID.
Note:
This method prompts for confirmation and requires a response before proceeding.
Raises:
IBMApiError: If the request to the server failed.
"""
confirmation = input('\nAre you sure you want to delete the experiment? '
'Results and plots for the experiment will also be deleted. [y/N]: ')
if confirmation not in ('y', 'Y'):
return
try:
self._api_client.experiment_delete(experiment_id)
except RequestsApiError as api_err:
if api_err.status_code == 404:
logger.warning("Experiment %s not found.", experiment_id)
else:
raise IBMApiError(f"Failed to process the request: {api_err}") from None
def create_analysis_result(
self,
experiment_id: str,
result_data: Dict,
result_type: str,
device_components: Optional[Union[List[Union[str, DeviceComponent]],
str, DeviceComponent]] = None,
tags: Optional[List[str]] = None,
quality: Union[ResultQuality, str] = ResultQuality.UNKNOWN,
verified: bool = False,
result_id: Optional[str] = None,
chisq: Optional[float] = None,
json_encoder: Type[json.JSONEncoder] = json.JSONEncoder,
**kwargs: Any,
) -> str:
"""Create a new analysis result in the database.
Args:
experiment_id: ID of the experiment this result is for.
result_data: Result data to be stored.
result_type: Analysis result type.
device_components: Target device components, such as qubits.
tags: Tags to be associated with the analysis result.
quality: Quality of this analysis.
verified: Whether the result quality has been verified.
result_id: Analysis result ID. It must be in the ``uuid4`` format.
One will be generated if not supplied.
chisq: chi^2 decimal value of the fit.
json_encoder: Custom JSON encoder to use to encode the analysis result.
kwargs: Additional analysis result attributes that are not supported
and will be ignored.
Returns:
Analysis result ID.
Raises:
IBMExperimentEntryExists: If the analysis result already exits.
IBMApiError: If the request to the server failed.
"""
# pylint: disable=arguments-differ
if kwargs:
logger.info("Keywords %s are not supported by IBM Quantum experiment service "
"and will be ignored.",
kwargs.keys())
components = []
if device_components:
if not isinstance(device_components, list):
device_components = [device_components]
for comp in device_components:
components.append(str(comp))
if isinstance(quality, str):
quality = ResultQuality(quality.upper())
request = self._analysis_result_to_api(
experiment_id=experiment_id,
device_components=components,
data=result_data,
result_type=result_type,
tags=tags,
quality=quality,
verified=verified,
result_id=result_id,
chisq=chisq
)
with map_api_error(f"Analysis result {result_id} already exists."):
response = self._api_client.analysis_result_upload(
json.dumps(request, cls=json_encoder))
return response['uuid']
def update_analysis_result(
self,
result_id: str,
result_data: Optional[Dict] = None,
tags: Optional[List[str]] = None,
quality: Union[ResultQuality, str] = None,
verified: bool = None,
chisq: Optional[float] = None,
json_encoder: Type[json.JSONEncoder] = json.JSONEncoder,
**kwargs: Any,
) -> None:
"""Update an existing analysis result.
Args:
result_id: Analysis result ID.
result_data: Result data to be stored.
quality: Quality of this analysis.
verified: Whether the result quality has been verified.
tags: Tags to be associated with the analysis result.
chisq: chi^2 decimal value of the fit.
json_encoder: Custom JSON encoder to use to encode the analysis result.
kwargs: Additional analysis result attributes that are not supported
and will be ignored.
Raises:
IBMExperimentEntryNotFound: If the analysis result does not exist.
IBMApiError: If the request to the server failed.
"""
# pylint: disable=arguments-differ
if kwargs:
logger.info("Keywords %s are not supported by IBM Quantum experiment service "
"and will be ignored.",
kwargs.keys())
if isinstance(quality, str):
quality = ResultQuality(quality.upper())
request = self._analysis_result_to_api(data=result_data,
tags=tags,
quality=quality,
verified=verified,
chisq=chisq)
with map_api_error(f"Analysis result {result_id} not found."):
self._api_client.analysis_result_update(
result_id, json.dumps(request, cls=json_encoder))
def _analysis_result_to_api(
self,
experiment_id: Optional[str] = None,
device_components: Optional[List[str]] = None,
data: Optional[Dict] = None,
result_type: Optional[str] = None,
tags: Optional[List[str]] = None,
quality: Optional[ResultQuality] = None,
verified: Optional[bool] = None,
result_id: Optional[str] = None,
chisq: Optional[float] = None,
) -> Dict:
"""Convert analysis result fields to server format.
Args:
experiment_id: ID of the experiment this result is for.
data: Result data to be stored.
result_type: Analysis result type.
device_components: Target device components, such as qubits.
tags: Tags to be associated with the analysis result.
quality: Quality of this analysis.
verified: Whether the result quality has been verified.
result_id: Analysis result ID. It must be in the ``uuid4`` format.
One will be generated if not supplied.
chisq: chi^2 decimal value of the fit.
Returns:
API request data.
"""
out = {} # type: Dict[str, Any]
if experiment_id:
out["experiment_uuid"] = experiment_id
if device_components:
out["device_components"] = device_components
if data:
out["fit"] = data
if result_type:
out["type"] = result_type
if tags:
out["tags"] = tags
if quality:
out["quality"] = RESULT_QUALITY_TO_API[quality]
if verified is not None:
out["verified"] = verified
if result_id:
out["uuid"] = result_id
if chisq:
out["chisq"] = chisq
return out
def analysis_result(
self,
result_id: str,
json_decoder: Type[json.JSONDecoder] = json.JSONDecoder
) -> Dict:
"""Retrieve a previously stored experiment.
Args:
result_id: Analysis result ID.
json_decoder: Custom JSON decoder to use to decode the retrieved analysis result.
Returns:
Retrieved analysis result.
Raises:
IBMExperimentEntryNotFound: If the analysis result does not exist.
IBMApiError: If the request to the server failed.
"""
with map_api_error(f"Analysis result {result_id} not found."):
raw_data = self._api_client.analysis_result_get(result_id)
return self._api_to_analysis_result(json.loads(raw_data, cls=json_decoder))
def analysis_results(
self,
limit: Optional[int] = 10,
json_decoder: Type[json.JSONDecoder] = json.JSONDecoder,
device_components: Optional[List[Union[str, DeviceComponent]]] = None,
device_components_operator: Optional[str] = None,
experiment_id: Optional[str] = None,
result_type: Optional[str] = None,
result_type_operator: Optional[str] = None,
backend_name: Optional[str] = None,
quality: Optional[Union[List[Union[ResultQuality, str]], ResultQuality, str]] = None,
verified: Optional[bool] = None,
tags: Optional[List[str]] = None,
tags_operator: Optional[str] = "OR",
sort_by: Optional[Union[str, List[str]]] = None,
**filters: Any
) -> List[Dict]:
"""Retrieve all analysis results, with optional filtering.
Args:
limit: Number of analysis results to retrieve.
json_decoder: Custom JSON decoder to use to decode the retrieved analysis results.
device_components: Filter by device components.
device_components_operator: Operator used when filtering by device components.
Valid values are ``None`` and "contains":
* If ``None``, an analysis result's device components must match
exactly for it to be included.
* If "contains" is specified, an analysis result's device components
must contain at least the values specified by the `device_components`
filter.
experiment_id: Experiment ID used for filtering.
result_type: Analysis result type used for filtering.
result_type_operator: Operator used when filtering by result type.
Valid values are ``None`` and "like":
* If ``None`` is specified, an analysis result's type value must
match exactly for it to be included.
* If "like" is specified, an analysis result's type value must
contain the value specified by `result_type`. For example,
``result_type="foo", result_type_operator="like"`` will match
both ``foo1`` and ``1foo``.
backend_name: Backend name used for filtering.
quality: Quality value used for filtering. If a list is given, analysis results
whose quality value is in the list will be included.
verified: Indicates whether this result has been verified..
tags: Filter by tags assigned to analysis results. This can be used
with `tags_operator` for granular filtering.
tags_operator: Logical operator to use when filtering by tags. Valid
values are "AND" and "OR":
* If "AND" is specified, then an analysis result must have all of the tags
specified in `tags` to be included.
* If "OR" is specified, then an analysis result only needs to have any
of the tags specified in `tags` to be included.
sort_by: Specifies how the output should be sorted. This can be a single sorting
option or a list of options. Each option should contain a sort key
and a direction. Valid sort keys are "creation_datetime", "device_components",
and "result_type". Valid directions are "asc" for ascending or "desc" for
descending.
For example, ``sort_by=["result_type: asc", "creation_datetime:desc"]`` will
return an output list that is first sorted by result type in
ascending order, then by creation datetime by descending order.
By default, analysis results are sorted by ``creation_datetime``
descending and ``result_id`` ascending.
**filters: Additional filtering keywords that are not supported and will be ignored.
Returns:
A list of analysis results. Each analysis result is a dictionary
containing the retrieved analysis result.
Raises:
ValueError: If an invalid parameter value is specified.
IBMApiError: If the request to the server failed.
"""
# pylint: disable=arguments-differ
if filters:
logger.info("Keywords %s are not supported by IBM Quantum experiment service "
"and will be ignored.",
filters.keys())
if limit is not None and (not isinstance(limit, int) or limit <= 0): # type: ignore
raise ValueError(f"{limit} is not a valid `limit`, which has to be a positive integer.")
quality = self._quality_filter_to_api(quality)
converted = self._filtering_to_api(
tags=tags,
tags_operator=tags_operator,
sort_by=sort_by,
sort_map={"creation_datetime": "created_at",
"device_components": "device_components",
"result_type": "type"},
device_components=device_components,
device_components_operator=device_components_operator,
item_type=result_type,
item_type_operator=result_type_operator
)
results = []
marker = None
while limit is None or limit > 0:
with map_api_error("Request failed."):
response = self._api_client.analysis_results(
limit=limit,
marker=marker,
backend_name=backend_name,
device_components=converted["device_components"],
experiment_uuid=experiment_id,
result_type=converted["type"],
quality=quality,
verified=verified,
tags=converted["tags"],
sort_by=converted["sort_by"]
)
raw_data = json.loads(response, cls=json_decoder)
marker = raw_data.get('marker')
for result in raw_data['analysis_results']:
results.append(self._api_to_analysis_result(result))
if limit:
limit -= len(raw_data['analysis_results'])
if not marker: # No more experiments to return.
break
return results
def _quality_filter_to_api(
self,
quality: Optional[Union[List[Union[ResultQuality, str]], ResultQuality, str]] = None,
) -> Optional[Union[str, List[str]]]:
"""Convert quality filter to server format."""
if not quality:
return None
if not isinstance(quality, list):
quality = [quality]
api_quals = []
for qual in quality:
if isinstance(qual, str):
qual = ResultQuality(qual.upper())
api_qual = RESULT_QUALITY_TO_API[qual]
if api_qual not in api_quals:
api_quals.append(api_qual)
if len(api_quals) == 1:
return api_quals[0]
if len(api_quals) == len(ResultQuality):
return None
return "in:" + ",".join(api_quals)
def _filtering_to_api(
self,
tags: Optional[List[str]] = None,
tags_operator: Optional[str] = "OR",
sort_by: Optional[Union[str, List[str]]] = None,
sort_map: Optional[Dict] = None,
device_components: Optional[List[Union[str, DeviceComponent]]] = None,
device_components_operator: Optional[str] = None,
item_type: Optional[str] = None,
item_type_operator: Optional[str] = None,
) -> Dict:
"""Convert filtering inputs to server format.
Args:
tags: Filtering by tags.
tags_operator: Tags operator.
sort_by: Specifies how the output should be sorted.
sort_map: Sort key to API key mapping.
device_components: Filter by device components.
device_components_operator: Device component operator.
item_type: Item type used for filtering.
item_type_operator: Operator used when filtering by type.
Returns:
A dictionary of mapped filters.
Raises:
ValueError: If an input key is invalid.
"""
tags_filter = None
if tags:
if tags_operator.upper() == 'OR':
tags_filter = 'any:' + ','.join(tags)
elif tags_operator.upper() == 'AND':
tags_filter = 'contains:' + ','.join(tags)
else:
raise ValueError('{} is not a valid `tags_operator`. Valid values are '
'"AND" and "OR".'.format(tags_operator))
sort_list = []
if sort_by:
if not isinstance(sort_by, list):
sort_by = [sort_by]
for sorter in sort_by:
key, direction = sorter.split(":")
key = key.lower()
if key not in sort_map:
raise ValueError(f'"{key}" is not a valid sort key. '
f'Valid sort keys are {sort_map.keys()}')
key = sort_map[key]
if direction not in ["asc", "desc"]:
raise ValueError(f'"{direction}" is not a valid sorting direction.'
f'Valid directions are "asc" and "desc".')
sort_list.append(f"{key}:{direction}")
sort_by = ",".join(sort_list)
if device_components:
device_components = [str(comp) for comp in device_components]
if device_components_operator:
if device_components_operator != "contains":
raise ValueError(f'{device_components_operator} is not a valid '
f'device_components_operator value. Valid values '
f'are ``None`` and "contains"')
device_components = \
"contains:" + ','.join(device_components) # type: ignore
if item_type and item_type_operator:
if item_type_operator != "like":
raise ValueError(f'"{item_type_operator}" is not a valid type operator value. '
f'Valid values are ``None`` and "like".')
item_type = "like:" + item_type
return {"tags": tags_filter,
"sort_by": sort_by,
"device_components": device_components,
"type": item_type}
def _api_to_analysis_result(
self,
raw_data: Dict,
) -> Dict:
"""Map API response to an AnalysisResult instance.
Args:
raw_data: API response data.
Returns:
Converted analysis result data.
"""
extra_data = {}
chisq = raw_data.get('chisq', None)
if chisq:
extra_data['chisq'] = chisq
backend_name = raw_data['device_name']
if backend_name:
extra_data['backend_name'] = backend_name
quality = raw_data.get('quality', None)
if quality:
quality = RESULT_QUALITY_FROM_API[quality]
self._convert_dt(raw_data.get('created_at', None), extra_data, 'creation_datetime')
self._convert_dt(raw_data.get('updated_at', None), extra_data, 'updated_datetime')
out_dict = {
"result_data": raw_data.get('fit', {}),
"result_type": raw_data.get('type', None),
"device_components": raw_data.get('device_components', []),
"experiment_id": raw_data.get('experiment_uuid'),
"result_id": raw_data.get('uuid', None),
"quality": quality,
"verified": raw_data.get('verified', False),
"tags": raw_data.get('tags', []),
"service": self,
**extra_data
}
return out_dict
def delete_analysis_result(
self,
result_id: str
) -> None:
"""Delete an analysis result.
Args:
result_id: Analysis result ID.
Note:
This method prompts for confirmation and requires a response before proceeding.
Raises:
IBMApiError: If the request to the server failed.
"""
confirmation = input('\nAre you sure you want to delete the analysis result? [y/N]: ')
if confirmation not in ('y', 'Y'):
return
try:
self._api_client.analysis_result_delete(result_id)
except RequestsApiError as api_err:
if api_err.status_code == 404:
logger.warning("Analysis result %s not found.", result_id)
else:
raise IBMApiError(f"Failed to process the request: {api_err}") from None
def create_figure(
self,
experiment_id: str,
figure: Union[str, bytes],
figure_name: Optional[str] = None,
sync_upload: bool = True
) -> Tuple[str, int]:
"""Store a new figure in the database.
Note:
Currently only SVG figures are supported.
Args:
experiment_id: ID of the experiment this figure is for.
figure: Name of the figure file or figure data to store.
figure_name: Name of the figure. If ``None``, the figure file name, if
given, or a generated name is used.
sync_upload: If ``True``, the plot will be uploaded synchronously.
Otherwise the upload will be asynchronous.
Returns:
A tuple of the name and size of the saved figure.
Raises:
IBMExperimentEntryExists: If the figure already exits.
IBMApiError: If the request to the server failed.
"""
if figure_name is None:
if isinstance(figure, str):
figure_name = figure
else:
figure_name = "figure_{}.svg".format(datetime.now().isoformat())
if not figure_name.endswith(".svg"):
figure_name += ".svg"
with map_api_error(f"Figure {figure_name} already exists."):
response = self._api_client.experiment_plot_upload(experiment_id, figure, figure_name,
sync_upload=sync_upload)
return response['name'], response['size']
def update_figure(
self,
experiment_id: str,
figure: Union[str, bytes],
figure_name: str,
sync_upload: bool = True
) -> Tuple[str, int]:
"""Update an existing figure.
Args:
experiment_id: Experiment ID.
figure: Name of the figure file or figure data to store.
figure_name: Name of the figure.
sync_upload: If ``True``, the plot will be uploaded synchronously.
Otherwise the upload will be asynchronous.
Returns:
A tuple of the name and size of the saved figure.
Raises:
IBMExperimentEntryNotFound: If the figure does not exist.
IBMApiError: If the request to the server failed.
"""
with map_api_error(f"Figure {figure_name} not found."):
response = self._api_client.experiment_plot_update(experiment_id, figure, figure_name,
sync_upload=sync_upload)
return response['name'], response['size']
def figure(
self,
experiment_id: str,
figure_name: str,
file_name: Optional[str] = None
) -> Union[int, bytes]:
"""Retrieve an existing figure.
Args:
experiment_id: Experiment ID.
figure_name: Name of the figure.
file_name: Name of the local file to save the figure to. If ``None``,
the content of the figure is returned instead.
Returns:
The size of the figure if `file_name` is specified. Otherwise the
content of the figure in bytes.
Raises:
IBMExperimentEntryNotFound: If the figure does not exist.
IBMApiError: If the request to the server failed.
"""
with map_api_error(f"Figure {figure_name} not found."):
data = self._api_client.experiment_plot_get(experiment_id, figure_name)
if file_name:
with open(file_name, 'wb') as file:
num_bytes = file.write(data)
return num_bytes
return data
def delete_figure(
self,
experiment_id: str,
figure_name: str
) -> None:
"""Delete an experiment plot.
Note:
This method prompts for confirmation and requires a response before proceeding.
Args:
experiment_id: Experiment ID.
figure_name: Name of the figure.
Raises:
IBMApiError: If the request to the server failed.
"""
confirmation = input('\nAre you sure you want to delete the experiment plot? [y/N]: ')
if confirmation not in ('y', 'Y'):
return
try:
self._api_client.experiment_plot_delete(experiment_id, figure_name)
except RequestsApiError as api_err:
if api_err.status_code == 404:
logger.warning("Figure %s not found.", figure_name)
else:
raise IBMApiError(f"Failed to process the request: {api_err}") from None
def device_components(
self,
backend_name: Optional[str] = None
) -> Union[Dict[str, List], List]:
"""Return the device components.
Args:
backend_name: Name of the backend whose components are to be retrieved.
Returns:
A list of device components if `backend_name` is specified. Otherwise
a dictionary whose keys are backend names the values
are lists of device components for the backends.
Raises:
IBMApiError: If the request to the server failed.
"""
with map_api_error(f"No device components found for backend {backend_name}"):
raw_data = self._api_client.device_components(backend_name)
components = defaultdict(list)
for data in raw_data:
components[data['device_name']].append(data['type'])
if backend_name:
return components[backend_name]
return dict(components)
@property
def preferences(self) -> Dict:
"""Return saved experiment preferences.
Note:
These are preferences passed to the applications that use this service
and have no effect on the service itself. It is up to the application,
such as ``qiskit-experiments`` to implement the preferences.
Returns:
Dict: The experiment preferences.
"""
return self._preferences
def save_preferences(self, auto_save: bool = None) -> None:
"""Stores experiment preferences on disk.
Note:
These are preferences passed to the applications that use this service
and have no effect on the service itself.
For example, if ``auto_save`` is set to ``True``, it tells the application,
such as ``qiskit-experiments``, that you prefer changes to be
automatically saved. It is up to the application to implement the preferences.
Args:
auto_save: Automatically save the experiment.
"""
update_cred = False
if auto_save is not None and auto_save != self._preferences["auto_save"]:
self._preferences['auto_save'] = auto_save
update_cred = True
if update_cred:
store_preferences(
{self._provider.credentials.unique_id(): {'experiment': self.preferences}})
| 42.137658
| 100
| 0.583699
|
ec6cc9df8ba677a28396eb799e1864c06222dca4
| 7,573
|
py
|
Python
|
tests/test_MTOColumn.py
|
xguse/table_enforcer
|
f3137839574bf8ea933a14ea16a8acba45e3e0c3
|
[
"MIT"
] | 13
|
2017-11-16T23:24:17.000Z
|
2021-05-28T01:05:31.000Z
|
tests/test_MTOColumn.py
|
xguse/table_enforcer
|
f3137839574bf8ea933a14ea16a8acba45e3e0c3
|
[
"MIT"
] | 1
|
2019-09-26T18:34:21.000Z
|
2021-10-12T17:18:24.000Z
|
tests/test_MTOColumn.py
|
xguse/table_enforcer
|
f3137839574bf8ea933a14ea16a8acba45e3e0c3
|
[
"MIT"
] | 1
|
2017-11-17T17:18:31.000Z
|
2017-11-17T17:18:31.000Z
|
"""Test the unit: test_MTOColumn."""
from collections import OrderedDict
import pytest
from .conftest import demo_good_df, sort_columns
import pandas as pd
import numpy as np
from box import Box
from table_enforcer import Column, CompoundColumn, BaseColumn
from table_enforcer import validate as v
import table_enforcer.errors as e
# helper functions
def is_subset(x, ref_set):
if not isinstance(ref_set, set):
valid = set(ref_set)
if not isinstance(x, set):
set_x = set([x])
else:
set_x = x
return set_x.issubset(ref_set)
# Transformation function
def join_as_tuple(df):
cols = Box()
cols.col6_7_8 = df[["col6", "col7", "col8"]].apply(
lambda row: (
row.col6,
row.col7,
row.col8,), axis=1)
new_columns = pd.DataFrame(cols)
return new_columns
# Validators
def col6_valid_values(series):
"""Validator"""
valid = [None, "DNASeq"]
return series.apply(is_subset, ref_set=valid)
def col7_valid_values(series):
"""Validator"""
valid = [None, "Protein Function"]
return series.apply(is_subset, ref_set=valid)
def col8_valid_values(series):
"""Validator"""
valid = [None, "RNASeq"]
return series.apply(is_subset, ref_set=valid)
def col6_7_8_valid_values(series):
"""Validator"""
valid = set(["DNASeq", "Protein Function", "RNASeq"])
return series.apply(is_subset, ref_set=valid)
# Recoders
def translate_col6(series):
"""Recode 0-> None; 1-> 'DNASeq' """
def rcode(x):
mapping = {0: None, 1: "DNASeq"}
return mapping[x]
return series.apply(rcode)
def translate_col7(series):
"""Recode 0-> None; 1-> 'Protein Function' """
def rcode(x):
mapping = {0: None, 1: "Protein Function"}
return mapping[x]
return series.apply(rcode)
def translate_col8(series):
"""Recode 0-> None; 1-> 'RNASeq' """
def rcode(x):
mapping = {0: None, 1: "RNASeq"}
return mapping[x]
return series.apply(rcode)
def setify_drop_nones(series):
"""Convert to sets and drop ``None`` values."""
def drop_nones(x):
x.discard(None)
return x
return series.apply(lambda x: set(x)).apply(drop_nones)
# Defining the Input Columns
@pytest.fixture()
def col6():
return Column(
name='col6',
dtype=(str, type(None)),
unique=False,
validators=[col6_valid_values],
recoders=[translate_col6],
)
@pytest.fixture()
def col7():
return Column(
name='col7',
dtype=(str, type(None)),
unique=False,
validators=[col7_valid_values],
recoders=[translate_col7],
)
@pytest.fixture()
def col8():
return Column(
name='col8',
dtype=(str, type(None)),
unique=False,
validators=[col8_valid_values],
recoders=[translate_col8],
)
# Defining the Output Column
@pytest.fixture()
def col6_7_8():
return Column(
name='col6_7_8',
dtype=set,
unique=False,
validators=[v.funcs.not_null, col6_7_8_valid_values],
recoders=[setify_drop_nones],
)
@pytest.fixture()
def col6_7_8_join(col6, col7, col8, col6_7_8):
return CompoundColumn(input_columns=[col6, col7, col8], output_columns=[col6_7_8], column_transform=join_as_tuple)
@pytest.mark.mtoc
def test_mto_column_init(col6_7_8_join, col6, col7, col8, col6_7_8):
assert col6_7_8_join.input_columns == [col6, col7, col8]
assert col6_7_8_join.output_columns == [col6_7_8]
assert col6_7_8_join.column_transform == join_as_tuple
assert issubclass(col6_7_8_join.__class__, BaseColumn)
assert all([isinstance(c, Column) for c in col6_7_8_join.input_columns])
assert all([isinstance(c, Column) for c in col6_7_8_join.output_columns])
@pytest.mark.mtoc
def test_mto_column_results(col6_7_8_join, demo_good_df, valid_values):
df = demo_good_df
valids = valid_values
assert sort_columns(col6_7_8_join._validate_input(df).reset_index()).equals(sort_columns(valids["validate_input"]))
assert sort_columns(col6_7_8_join._validate_output(df).reset_index()
).equals(sort_columns(valids["validate_output"]))
assert sort_columns(col6_7_8_join.validate(df).reset_index()).equals(sort_columns(valids["validate_all"]))
@pytest.fixture()
def valid_values():
validate_input_json = '''{"validation_type":{"0":"input","1":"input","2":"input","3":"input","4":"input","5":"input","6":"input","7":"input","8":"input","9":"input","10":"input","11":"input"},"column_name":{"0":"col6","1":"col6","2":"col6","3":"col6","4":"col7","5":"col7","6":"col7","7":"col7","8":"col8","9":"col8","10":"col8","11":"col8"},"row":{"0":0,"1":1,"2":2,"3":3,"4":0,"5":1,"6":2,"7":3,"8":0,"9":1,"10":2,"11":3},"col6_valid_values":{"0":false,"1":false,"2":false,"3":false,"4":null,"5":null,"6":null,"7":null,"8":null,"9":null,"10":null,"11":null},"col7_valid_values":{"0":null,"1":null,"2":null,"3":null,"4":false,"5":false,"6":false,"7":false,"8":null,"9":null,"10":null,"11":null},"col8_valid_values":{"0":null,"1":null,"2":null,"3":null,"4":null,"5":null,"6":null,"7":null,"8":false,"9":false,"10":false,"11":false},"dtype":{"0":false,"1":false,"2":false,"3":false,"4":false,"5":false,"6":false,"7":false,"8":false,"9":false,"10":false,"11":false}}'''
validate_output_json = '''{"validation_type":{"0":"output","1":"output","2":"output","3":"output"},"column_name":{"0":"col6_7_8","1":"col6_7_8","2":"col6_7_8","3":"col6_7_8"},"row":{"0":0,"1":1,"2":2,"3":3},"col6_7_8_valid_values":{"0":false,"1":false,"2":false,"3":false},"not_null":{"0":true,"1":true,"2":true,"3":true},"dtype":{"0":false,"1":false,"2":false,"3":false}}'''
validate_all_json = '''{"validation_type":{"0":"input","1":"input","2":"input","3":"input","4":"input","5":"input","6":"input","7":"input","8":"input","9":"input","10":"input","11":"input","12":"output","13":"output","14":"output","15":"output"},"column_name":{"0":"col6","1":"col6","2":"col6","3":"col6","4":"col7","5":"col7","6":"col7","7":"col7","8":"col8","9":"col8","10":"col8","11":"col8","12":"col6_7_8","13":"col6_7_8","14":"col6_7_8","15":"col6_7_8"},"row":{"0":0,"1":1,"2":2,"3":3,"4":0,"5":1,"6":2,"7":3,"8":0,"9":1,"10":2,"11":3,"12":0,"13":1,"14":2,"15":3},"col6_7_8_valid_values":{"0":true,"1":true,"2":true,"3":true,"4":true,"5":true,"6":true,"7":true,"8":true,"9":true,"10":true,"11":true,"12":false,"13":false,"14":false,"15":false},"col6_valid_values":{"0":false,"1":false,"2":false,"3":false,"4":true,"5":true,"6":true,"7":true,"8":true,"9":true,"10":true,"11":true,"12":true,"13":true,"14":true,"15":true},"col7_valid_values":{"0":true,"1":true,"2":true,"3":true,"4":false,"5":false,"6":false,"7":false,"8":true,"9":true,"10":true,"11":true,"12":true,"13":true,"14":true,"15":true},"col8_valid_values":{"0":true,"1":true,"2":true,"3":true,"4":true,"5":true,"6":true,"7":true,"8":false,"9":false,"10":false,"11":false,"12":true,"13":true,"14":true,"15":true},"dtype":{"0":false,"1":false,"2":false,"3":false,"4":false,"5":false,"6":false,"7":false,"8":false,"9":false,"10":false,"11":false,"12":false,"13":false,"14":false,"15":false},"not_null":{"0":true,"1":true,"2":true,"3":true,"4":true,"5":true,"6":true,"7":true,"8":true,"9":true,"10":true,"11":true,"12":true,"13":true,"14":true,"15":true}}'''
vals = {}
vals["validate_input"] = pd.read_json(validate_input_json)
vals["validate_output"] = pd.read_json(validate_output_json)
vals["validate_all"] = pd.read_json(validate_all_json)
return vals
| 39.442708
| 1,624
| 0.6267
|
7bb6b10100e53ca9389f4ce26d11f2acd061d184
| 34
|
py
|
Python
|
aux/biblib/__init__.py
|
marcschulder/BibTexNanny
|
2244dae04580fb49d785604d7f041a5e4ea87648
|
[
"MIT"
] | 5
|
2018-10-01T17:07:44.000Z
|
2020-09-02T05:44:39.000Z
|
aux/biblib/__init__.py
|
marcschulder/BibTexNanny
|
2244dae04580fb49d785604d7f041a5e4ea87648
|
[
"MIT"
] | 10
|
2019-03-24T14:31:25.000Z
|
2019-12-06T17:50:27.000Z
|
aux/biblib/__init__.py
|
marcschulder/BibTexNanny
|
2244dae04580fb49d785604d7f041a5e4ea87648
|
[
"MIT"
] | null | null | null |
from . import bib, algo, messages
| 17
| 33
| 0.735294
|
3d223707681e3d7ed61f0cf5ac4daff654874237
| 1,895
|
py
|
Python
|
python/akg/ops/nn/ascend/relu_ad.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 286
|
2020-06-23T06:40:44.000Z
|
2022-03-30T01:27:49.000Z
|
python/akg/ops/nn/ascend/relu_ad.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 10
|
2020-07-31T03:26:59.000Z
|
2021-12-27T15:00:54.000Z
|
python/akg/ops/nn/ascend/relu_ad.py
|
tianjiashuo/akg
|
a9cbf642063fb1086a93e8bc6be6feb145689817
|
[
"Apache-2.0"
] | 30
|
2020-07-17T01:04:14.000Z
|
2021-12-27T14:05:19.000Z
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: relu_ad"""
import akg
import akg.tvm
import akg.utils as utils
from akg.utils import custom_tiling as ct_util
from akg.ops.nn.ascend.relu import Relu
from akg.dim import DIM
relu_ad_set_dim_map = {
}
def relu_ad_set_dim_func(head, a):
"""set dim info"""
key = []
key.append(tuple(a.shape))
key.append(a.dtype)
hash_key = str(tuple(key))
if hash_key in relu_ad_set_dim_map.keys():
return ct_util.set_dims(relu_ad_set_dim_map[hash_key]), hash_key
return "", hash_key
@ct_util.reg_set_dim_func(relu_ad_set_dim_func)
@utils.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor, (str, type(None)))
def ReluAd(head, a, target=utils.CCE):
"""
Compute gradient of relu operator using automatic differentiate.
Args:
head (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
a (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
Returns:
tvm.tensor.Tensor with the same shape as input.
Supported Platforms:
'Ascend'
"""
dim_info, _ = relu_ad_set_dim_func(head, a)
attrs = {DIM: dim_info}
b = Relu(a)
jacs = list(akg.differentiate(b, [a], head))
return jacs[0], attrs
| 29.609375
| 88
| 0.709763
|
850a1fba1f6e3d1c8741e43eaaf112dca5b3e742
| 1,696
|
py
|
Python
|
sktime/regression/base.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 5,349
|
2019-03-21T14:56:50.000Z
|
2022-03-31T11:25:30.000Z
|
sktime/regression/base.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 1,803
|
2019-03-26T13:33:53.000Z
|
2022-03-31T23:58:10.000Z
|
sktime/regression/base.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 911
|
2019-03-25T01:21:30.000Z
|
2022-03-31T04:45:51.000Z
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements base class for time series regression estimators in sktime."""
__author__ = ["Markus Löning"]
__all__ = ["BaseRegressor"]
from sktime.base import BaseEstimator
class BaseRegressor(BaseEstimator):
"""Base class for regressors, for identification."""
def fit(self, X, y):
"""Fit regressor to training data.
Parameters
----------
X : pd.DataFrame, optional (default=None)
Exogeneous data
y : pd.Series, pd.DataFrame, or np.array
Target time series to which to fit the regressor.
Returns
-------
self :
Reference to self.
"""
raise NotImplementedError("abstract method")
def predict(self, X):
"""Predict time series.
Parameters
----------
X : pd.DataFrame, shape=[n_obs, n_vars]
A2-d dataframe of exogenous variables.
Returns
-------
y_pred : pd.Series
Regression predictions.
"""
raise NotImplementedError("abstract method")
def score(self, X, y):
"""Scores regression against ground truth, R-squared.
Parameters
----------
X : pd.DataFrame, shape=[n_obs, n_vars]
A2-d dataframe of exogenous variables.
y : pd.Series
Target time series to which to compare the predictions.
Returns
-------
score : float
R-squared score.
"""
from sklearn.metrics import r2_score
return r2_score(y, self.predict(X))
| 26.092308
| 76
| 0.569575
|
56c88caeaec314affa268461cdbd32cd8cf4cc7b
| 2,123
|
py
|
Python
|
functionaltests/common/config.py
|
kiall/designate-py3
|
2b135d64bb0ced77327a563e037b270d1e5ca308
|
[
"Apache-2.0"
] | null | null | null |
functionaltests/common/config.py
|
kiall/designate-py3
|
2b135d64bb0ced77327a563e037b270d1e5ca308
|
[
"Apache-2.0"
] | null | null | null |
functionaltests/common/config.py
|
kiall/designate-py3
|
2b135d64bb0ced77327a563e037b270d1e5ca308
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from oslo_config import cfg
cfg.CONF.register_group(cfg.OptGroup(
name='identity', title="Configuration for Keystone auth"
))
cfg.CONF.register_group(cfg.OptGroup(
name='noauth', title="Configuration to run tests without Keystone"
))
cfg.CONF.register_opts([
cfg.StrOpt('designate_endpoint_override',
help="Endpoint to use to bypass Keystone auth"),
cfg.StrOpt('uri', help="The Keystone v2 endpoint"),
cfg.StrOpt('uri_v3', help="The Keystone v3 endpoint"),
cfg.StrOpt('auth_version', default='v2'),
cfg.StrOpt('region', default='RegionOne'),
cfg.StrOpt('username'),
cfg.StrOpt('tenant_name'),
cfg.StrOpt('password', secret=True),
cfg.StrOpt('domain_name'),
cfg.StrOpt('alt_username'),
cfg.StrOpt('alt_tenant_name'),
cfg.StrOpt('alt_password', secret=True),
cfg.StrOpt('alt_domain_name'),
cfg.StrOpt('admin_username'),
cfg.StrOpt('admin_tenant_name'),
cfg.StrOpt('admin_password', secret=True),
cfg.StrOpt('admin_domain_name'),
], group='identity')
cfg.CONF.register_opts([
cfg.StrOpt('designate_endpoint', help="The Designate API endpoint"),
cfg.StrOpt('tenant_id', default='noauth-project'),
cfg.StrOpt('alt_tenant_id', default='alt-project'),
cfg.StrOpt('admin_tenant_id', default='admin-project'),
cfg.BoolOpt('use_noauth', default=False),
], group='noauth')
def find_config_file():
return os.environ.get('TEMPEST_CONFIG', 'tempest.conf')
def read_config():
cfg.CONF(args=[], default_config_files=[find_config_file()])
| 30.768116
| 72
| 0.719736
|
e0b33d3e8a5888f0b65e653bc1da7c2b692ecd5e
| 952
|
py
|
Python
|
session01_Decorators/listize.py
|
morales-gregorio/Python-Module-of-the-Week
|
2c68e20be3e174be9b91c92ac872806dd982e7d2
|
[
"MIT"
] | 15
|
2017-06-22T11:57:38.000Z
|
2022-03-31T13:34:07.000Z
|
session01_Decorators/listize.py
|
morales-gregorio/Python-Module-of-the-Week
|
2c68e20be3e174be9b91c92ac872806dd982e7d2
|
[
"MIT"
] | 3
|
2019-10-16T10:32:55.000Z
|
2020-01-09T09:24:48.000Z
|
session01_Decorators/listize.py
|
morales-gregorio/Python-Module-of-the-Week
|
2c68e20be3e174be9b91c92ac872806dd982e7d2
|
[
"MIT"
] | 6
|
2016-10-07T12:50:24.000Z
|
2019-11-28T11:15:04.000Z
|
# -*- coding: utf-8 -*-
"""
Exercise: listize decorator
When a function returns a list of results, we might need
to gather those results in a list:
def lucky_numbers(n):
ans = []
for i in range(n):
if i % 7 != 0:
continue
if sum(int(digit) for digit in str(i)) % 3 != 0:
continue
ans.append(i)
return ans
This looks much nicer when written as a generator.
① Convert lucky_numbers to be a generator.
② Write a 'listize' decorator which gathers the results from a
generator and returns a list and use it to wrap the new lucky_numbers().
Subexercise: ③ Write an 'arrayize' decorator which returns the results
in a numpy array instead of a list.
>>> @listize
... def f():
... yield 1
... yield 2
>>> f()
[1, 2]
"""
import functools
def listize(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return functools.update_wrapper(wrapper, func)
| 22.666667
| 72
| 0.644958
|
630c07f2d4f3a7ce21e02417730706bba19a329b
| 36,765
|
py
|
Python
|
third_party/syntaxnet/dragnn/protos/spec_pb2.py
|
anders-sandholm/sling
|
f3ab461cf378b31ad202cfa4892a007b85cf08af
|
[
"Apache-2.0"
] | 1
|
2020-03-16T05:03:21.000Z
|
2020-03-16T05:03:21.000Z
|
third_party/syntaxnet/dragnn/protos/spec_pb2.py
|
anders-sandholm/sling
|
f3ab461cf378b31ad202cfa4892a007b85cf08af
|
[
"Apache-2.0"
] | null | null | null |
third_party/syntaxnet/dragnn/protos/spec_pb2.py
|
anders-sandholm/sling
|
f3ab461cf378b31ad202cfa4892a007b85cf08af
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: third_party/syntaxnet/dragnn/protos/spec.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='third_party/syntaxnet/dragnn/protos/spec.proto',
package='syntaxnet.dragnn',
syntax='proto2',
serialized_pb=_b('\n.third_party/syntaxnet/dragnn/protos/spec.proto\x12\x10syntaxnet.dragnn\"X\n\nMasterSpec\x12\x32\n\tcomponent\x18\x01 \x03(\x0b\x32\x1f.syntaxnet.dragnn.ComponentSpecJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"\xe1\x03\n\rComponentSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x41\n\x11transition_system\x18\x02 \x01(\x0b\x32&.syntaxnet.dragnn.RegisteredModuleSpec\x12,\n\x08resource\x18\x03 \x03(\x0b\x32\x1a.syntaxnet.dragnn.Resource\x12<\n\rfixed_feature\x18\x04 \x03(\x0b\x32%.syntaxnet.dragnn.FixedFeatureChannel\x12>\n\x0elinked_feature\x18\x05 \x03(\x0b\x32&.syntaxnet.dragnn.LinkedFeatureChannel\x12<\n\x0cnetwork_unit\x18\x06 \x01(\x0b\x32&.syntaxnet.dragnn.RegisteredModuleSpec\x12\x37\n\x07\x62\x61\x63kend\x18\x07 \x01(\x0b\x32&.syntaxnet.dragnn.RegisteredModuleSpec\x12\x13\n\x0bnum_actions\x18\x08 \x01(\x05\x12\x41\n\x11\x63omponent_builder\x18\n \x01(\x0b\x32&.syntaxnet.dragnn.RegisteredModuleSpecJ\x04\x08\t\x10\n\"\xae\x01\n\x14RegisteredModuleSpec\x12\x17\n\x0fregistered_name\x18\x01 \x01(\t\x12J\n\nparameters\x18\x02 \x03(\x0b\x32\x36.syntaxnet.dragnn.RegisteredModuleSpec.ParametersEntry\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\">\n\x08Resource\x12\x0c\n\x04name\x18\x01 \x01(\t\x12$\n\x04part\x18\x02 \x03(\x0b\x32\x16.syntaxnet.dragnn.Part\"H\n\x04Part\x12\x14\n\x0c\x66ile_pattern\x18\x01 \x01(\t\x12\x13\n\x0b\x66ile_format\x18\x02 \x01(\t\x12\x15\n\rrecord_format\x18\x03 \x01(\t\"\xf5\x01\n\x13\x46ixedFeatureChannel\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03\x66ml\x18\x02 \x01(\t\x12\x15\n\rembedding_dim\x18\x03 \x01(\x05\x12\x17\n\x0fvocabulary_size\x18\x04 \x01(\x05\x12\x0c\n\x04size\x18\x05 \x01(\x05\x12\x13\n\x0bis_constant\x18\t \x01(\x08\x12?\n\x1bpretrained_embedding_matrix\x18\x07 \x01(\x0b\x32\x1a.syntaxnet.dragnn.Resource\x12)\n\x05vocab\x18\x08 \x01(\x0b\x32\x1a.syntaxnet.dragnn.ResourceJ\x04\x08\x06\x10\x07\"\xa1\x01\n\x14LinkedFeatureChannel\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03\x66ml\x18\x02 \x01(\t\x12\x15\n\rembedding_dim\x18\x03 \x01(\x05\x12\x0c\n\x04size\x18\x04 \x01(\x05\x12\x18\n\x10source_component\x18\x05 \x01(\t\x12\x19\n\x11source_translator\x18\x06 \x01(\t\x12\x14\n\x0csource_layer\x18\x07 \x01(\t\"\x99\x06\n\tGridPoint\x12\x1a\n\rlearning_rate\x18\x01 \x01(\x01:\x03\x30.1\x12\x15\n\x08momentum\x18\x02 \x01(\x01:\x03\x30.9\x12\x18\n\ndecay_base\x18\x10 \x01(\x01:\x04\x30.96\x12\x19\n\x0b\x64\x65\x63\x61y_steps\x18\x03 \x01(\x05:\x04\x31\x30\x30\x30\x12\x1d\n\x0f\x64\x65\x63\x61y_staircase\x18\x11 \x01(\x08:\x04true\x12\x0f\n\x04seed\x18\x04 \x01(\x05:\x01\x30\x12!\n\x0flearning_method\x18\x07 \x01(\t:\x08momentum\x12!\n\x12use_moving_average\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0e\x61verage_weight\x18\t \x01(\x01:\x06\x30.9999\x12\x17\n\x0c\x64ropout_rate\x18\n \x01(\x01:\x01\x31\x12\"\n\x16recurrent_dropout_rate\x18\x14 \x01(\x01:\x02-1\x12\x1d\n\x12gradient_clip_norm\x18\x0b \x01(\x01:\x01\x30\x12T\n\x18\x63omposite_optimizer_spec\x18\x0c \x01(\x0b\x32\x32.syntaxnet.dragnn.GridPoint.CompositeOptimizerSpec\x12\x18\n\nadam_beta1\x18\r \x01(\x01:\x04\x30.01\x12\x1a\n\nadam_beta2\x18\x0e \x01(\x01:\x06\x30.9999\x12\x17\n\x08\x61\x64\x61m_eps\x18\x0f \x01(\x01:\x05\x31\x65-08\x12-\n\x1dl2_regularization_coefficient\x18\x12 \x01(\x01:\x06\x30.0001\x12\x1a\n\x0fself_norm_alpha\x18\x13 \x01(\x01:\x01\x30\x12#\n\x1bself_norm_components_filter\x18\x15 \x01(\t\x1a\x90\x01\n\x16\x43ompositeOptimizerSpec\x12,\n\x07method1\x18\x01 \x01(\x0b\x32\x1b.syntaxnet.dragnn.GridPoint\x12,\n\x07method2\x18\x02 \x01(\x0b\x32\x1b.syntaxnet.dragnn.GridPoint\x12\x1a\n\x12switch_after_steps\x18\x03 \x01(\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07\"j\n\x0bTrainTarget\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63omponent_weights\x18\x02 \x03(\x01\x12\x1b\n\x13unroll_using_oracle\x18\x03 \x03(\x08\x12\x15\n\tmax_index\x18\x04 \x01(\x05:\x02-1')
)
_MASTERSPEC = _descriptor.Descriptor(
name='MasterSpec',
full_name='syntaxnet.dragnn.MasterSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='component', full_name='syntaxnet.dragnn.MasterSpec.component', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=156,
)
_COMPONENTSPEC = _descriptor.Descriptor(
name='ComponentSpec',
full_name='syntaxnet.dragnn.ComponentSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='syntaxnet.dragnn.ComponentSpec.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transition_system', full_name='syntaxnet.dragnn.ComponentSpec.transition_system', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='resource', full_name='syntaxnet.dragnn.ComponentSpec.resource', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fixed_feature', full_name='syntaxnet.dragnn.ComponentSpec.fixed_feature', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='linked_feature', full_name='syntaxnet.dragnn.ComponentSpec.linked_feature', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='network_unit', full_name='syntaxnet.dragnn.ComponentSpec.network_unit', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='backend', full_name='syntaxnet.dragnn.ComponentSpec.backend', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_actions', full_name='syntaxnet.dragnn.ComponentSpec.num_actions', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='component_builder', full_name='syntaxnet.dragnn.ComponentSpec.component_builder', index=8,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=640,
)
_REGISTEREDMODULESPEC_PARAMETERSENTRY = _descriptor.Descriptor(
name='ParametersEntry',
full_name='syntaxnet.dragnn.RegisteredModuleSpec.ParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='syntaxnet.dragnn.RegisteredModuleSpec.ParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='syntaxnet.dragnn.RegisteredModuleSpec.ParametersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=768,
serialized_end=817,
)
_REGISTEREDMODULESPEC = _descriptor.Descriptor(
name='RegisteredModuleSpec',
full_name='syntaxnet.dragnn.RegisteredModuleSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='registered_name', full_name='syntaxnet.dragnn.RegisteredModuleSpec.registered_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameters', full_name='syntaxnet.dragnn.RegisteredModuleSpec.parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_REGISTEREDMODULESPEC_PARAMETERSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=643,
serialized_end=817,
)
_RESOURCE = _descriptor.Descriptor(
name='Resource',
full_name='syntaxnet.dragnn.Resource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='syntaxnet.dragnn.Resource.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='part', full_name='syntaxnet.dragnn.Resource.part', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=819,
serialized_end=881,
)
_PART = _descriptor.Descriptor(
name='Part',
full_name='syntaxnet.dragnn.Part',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_pattern', full_name='syntaxnet.dragnn.Part.file_pattern', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_format', full_name='syntaxnet.dragnn.Part.file_format', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='record_format', full_name='syntaxnet.dragnn.Part.record_format', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=883,
serialized_end=955,
)
_FIXEDFEATURECHANNEL = _descriptor.Descriptor(
name='FixedFeatureChannel',
full_name='syntaxnet.dragnn.FixedFeatureChannel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='syntaxnet.dragnn.FixedFeatureChannel.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fml', full_name='syntaxnet.dragnn.FixedFeatureChannel.fml', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='embedding_dim', full_name='syntaxnet.dragnn.FixedFeatureChannel.embedding_dim', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vocabulary_size', full_name='syntaxnet.dragnn.FixedFeatureChannel.vocabulary_size', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='size', full_name='syntaxnet.dragnn.FixedFeatureChannel.size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_constant', full_name='syntaxnet.dragnn.FixedFeatureChannel.is_constant', index=5,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pretrained_embedding_matrix', full_name='syntaxnet.dragnn.FixedFeatureChannel.pretrained_embedding_matrix', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vocab', full_name='syntaxnet.dragnn.FixedFeatureChannel.vocab', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=958,
serialized_end=1203,
)
_LINKEDFEATURECHANNEL = _descriptor.Descriptor(
name='LinkedFeatureChannel',
full_name='syntaxnet.dragnn.LinkedFeatureChannel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='syntaxnet.dragnn.LinkedFeatureChannel.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fml', full_name='syntaxnet.dragnn.LinkedFeatureChannel.fml', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='embedding_dim', full_name='syntaxnet.dragnn.LinkedFeatureChannel.embedding_dim', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='size', full_name='syntaxnet.dragnn.LinkedFeatureChannel.size', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_component', full_name='syntaxnet.dragnn.LinkedFeatureChannel.source_component', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_translator', full_name='syntaxnet.dragnn.LinkedFeatureChannel.source_translator', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_layer', full_name='syntaxnet.dragnn.LinkedFeatureChannel.source_layer', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1206,
serialized_end=1367,
)
_GRIDPOINT_COMPOSITEOPTIMIZERSPEC = _descriptor.Descriptor(
name='CompositeOptimizerSpec',
full_name='syntaxnet.dragnn.GridPoint.CompositeOptimizerSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='method1', full_name='syntaxnet.dragnn.GridPoint.CompositeOptimizerSpec.method1', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='method2', full_name='syntaxnet.dragnn.GridPoint.CompositeOptimizerSpec.method2', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='switch_after_steps', full_name='syntaxnet.dragnn.GridPoint.CompositeOptimizerSpec.switch_after_steps', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2007,
serialized_end=2151,
)
_GRIDPOINT = _descriptor.Descriptor(
name='GridPoint',
full_name='syntaxnet.dragnn.GridPoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='syntaxnet.dragnn.GridPoint.learning_rate', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0.1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='momentum', full_name='syntaxnet.dragnn.GridPoint.momentum', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0.9),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='decay_base', full_name='syntaxnet.dragnn.GridPoint.decay_base', index=2,
number=16, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0.96),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='decay_steps', full_name='syntaxnet.dragnn.GridPoint.decay_steps', index=3,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='decay_staircase', full_name='syntaxnet.dragnn.GridPoint.decay_staircase', index=4,
number=17, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seed', full_name='syntaxnet.dragnn.GridPoint.seed', index=5,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='learning_method', full_name='syntaxnet.dragnn.GridPoint.learning_method', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("momentum").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_moving_average', full_name='syntaxnet.dragnn.GridPoint.use_moving_average', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='average_weight', full_name='syntaxnet.dragnn.GridPoint.average_weight', index=8,
number=9, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0.9999),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_rate', full_name='syntaxnet.dragnn.GridPoint.dropout_rate', index=9,
number=10, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='recurrent_dropout_rate', full_name='syntaxnet.dragnn.GridPoint.recurrent_dropout_rate', index=10,
number=20, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(-1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gradient_clip_norm', full_name='syntaxnet.dragnn.GridPoint.gradient_clip_norm', index=11,
number=11, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='composite_optimizer_spec', full_name='syntaxnet.dragnn.GridPoint.composite_optimizer_spec', index=12,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='adam_beta1', full_name='syntaxnet.dragnn.GridPoint.adam_beta1', index=13,
number=13, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0.01),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='adam_beta2', full_name='syntaxnet.dragnn.GridPoint.adam_beta2', index=14,
number=14, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0.9999),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='adam_eps', full_name='syntaxnet.dragnn.GridPoint.adam_eps', index=15,
number=15, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(1e-08),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='l2_regularization_coefficient', full_name='syntaxnet.dragnn.GridPoint.l2_regularization_coefficient', index=16,
number=18, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0.0001),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='self_norm_alpha', full_name='syntaxnet.dragnn.GridPoint.self_norm_alpha', index=17,
number=19, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='self_norm_components_filter', full_name='syntaxnet.dragnn.GridPoint.self_norm_components_filter', index=18,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GRIDPOINT_COMPOSITEOPTIMIZERSPEC, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1370,
serialized_end=2163,
)
_TRAINTARGET = _descriptor.Descriptor(
name='TrainTarget',
full_name='syntaxnet.dragnn.TrainTarget',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='syntaxnet.dragnn.TrainTarget.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='component_weights', full_name='syntaxnet.dragnn.TrainTarget.component_weights', index=1,
number=2, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unroll_using_oracle', full_name='syntaxnet.dragnn.TrainTarget.unroll_using_oracle', index=2,
number=3, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_index', full_name='syntaxnet.dragnn.TrainTarget.max_index', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2165,
serialized_end=2271,
)
_MASTERSPEC.fields_by_name['component'].message_type = _COMPONENTSPEC
_COMPONENTSPEC.fields_by_name['transition_system'].message_type = _REGISTEREDMODULESPEC
_COMPONENTSPEC.fields_by_name['resource'].message_type = _RESOURCE
_COMPONENTSPEC.fields_by_name['fixed_feature'].message_type = _FIXEDFEATURECHANNEL
_COMPONENTSPEC.fields_by_name['linked_feature'].message_type = _LINKEDFEATURECHANNEL
_COMPONENTSPEC.fields_by_name['network_unit'].message_type = _REGISTEREDMODULESPEC
_COMPONENTSPEC.fields_by_name['backend'].message_type = _REGISTEREDMODULESPEC
_COMPONENTSPEC.fields_by_name['component_builder'].message_type = _REGISTEREDMODULESPEC
_REGISTEREDMODULESPEC_PARAMETERSENTRY.containing_type = _REGISTEREDMODULESPEC
_REGISTEREDMODULESPEC.fields_by_name['parameters'].message_type = _REGISTEREDMODULESPEC_PARAMETERSENTRY
_RESOURCE.fields_by_name['part'].message_type = _PART
_FIXEDFEATURECHANNEL.fields_by_name['pretrained_embedding_matrix'].message_type = _RESOURCE
_FIXEDFEATURECHANNEL.fields_by_name['vocab'].message_type = _RESOURCE
_GRIDPOINT_COMPOSITEOPTIMIZERSPEC.fields_by_name['method1'].message_type = _GRIDPOINT
_GRIDPOINT_COMPOSITEOPTIMIZERSPEC.fields_by_name['method2'].message_type = _GRIDPOINT
_GRIDPOINT_COMPOSITEOPTIMIZERSPEC.containing_type = _GRIDPOINT
_GRIDPOINT.fields_by_name['composite_optimizer_spec'].message_type = _GRIDPOINT_COMPOSITEOPTIMIZERSPEC
DESCRIPTOR.message_types_by_name['MasterSpec'] = _MASTERSPEC
DESCRIPTOR.message_types_by_name['ComponentSpec'] = _COMPONENTSPEC
DESCRIPTOR.message_types_by_name['RegisteredModuleSpec'] = _REGISTEREDMODULESPEC
DESCRIPTOR.message_types_by_name['Resource'] = _RESOURCE
DESCRIPTOR.message_types_by_name['Part'] = _PART
DESCRIPTOR.message_types_by_name['FixedFeatureChannel'] = _FIXEDFEATURECHANNEL
DESCRIPTOR.message_types_by_name['LinkedFeatureChannel'] = _LINKEDFEATURECHANNEL
DESCRIPTOR.message_types_by_name['GridPoint'] = _GRIDPOINT
DESCRIPTOR.message_types_by_name['TrainTarget'] = _TRAINTARGET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MasterSpec = _reflection.GeneratedProtocolMessageType('MasterSpec', (_message.Message,), dict(
DESCRIPTOR = _MASTERSPEC,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.MasterSpec)
))
_sym_db.RegisterMessage(MasterSpec)
ComponentSpec = _reflection.GeneratedProtocolMessageType('ComponentSpec', (_message.Message,), dict(
DESCRIPTOR = _COMPONENTSPEC,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.ComponentSpec)
))
_sym_db.RegisterMessage(ComponentSpec)
RegisteredModuleSpec = _reflection.GeneratedProtocolMessageType('RegisteredModuleSpec', (_message.Message,), dict(
ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _REGISTEREDMODULESPEC_PARAMETERSENTRY,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.RegisteredModuleSpec.ParametersEntry)
))
,
DESCRIPTOR = _REGISTEREDMODULESPEC,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.RegisteredModuleSpec)
))
_sym_db.RegisterMessage(RegisteredModuleSpec)
_sym_db.RegisterMessage(RegisteredModuleSpec.ParametersEntry)
Resource = _reflection.GeneratedProtocolMessageType('Resource', (_message.Message,), dict(
DESCRIPTOR = _RESOURCE,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.Resource)
))
_sym_db.RegisterMessage(Resource)
Part = _reflection.GeneratedProtocolMessageType('Part', (_message.Message,), dict(
DESCRIPTOR = _PART,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.Part)
))
_sym_db.RegisterMessage(Part)
FixedFeatureChannel = _reflection.GeneratedProtocolMessageType('FixedFeatureChannel', (_message.Message,), dict(
DESCRIPTOR = _FIXEDFEATURECHANNEL,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.FixedFeatureChannel)
))
_sym_db.RegisterMessage(FixedFeatureChannel)
LinkedFeatureChannel = _reflection.GeneratedProtocolMessageType('LinkedFeatureChannel', (_message.Message,), dict(
DESCRIPTOR = _LINKEDFEATURECHANNEL,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.LinkedFeatureChannel)
))
_sym_db.RegisterMessage(LinkedFeatureChannel)
GridPoint = _reflection.GeneratedProtocolMessageType('GridPoint', (_message.Message,), dict(
CompositeOptimizerSpec = _reflection.GeneratedProtocolMessageType('CompositeOptimizerSpec', (_message.Message,), dict(
DESCRIPTOR = _GRIDPOINT_COMPOSITEOPTIMIZERSPEC,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.GridPoint.CompositeOptimizerSpec)
))
,
DESCRIPTOR = _GRIDPOINT,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.GridPoint)
))
_sym_db.RegisterMessage(GridPoint)
_sym_db.RegisterMessage(GridPoint.CompositeOptimizerSpec)
TrainTarget = _reflection.GeneratedProtocolMessageType('TrainTarget', (_message.Message,), dict(
DESCRIPTOR = _TRAINTARGET,
__module__ = 'third_party.syntaxnet.dragnn.protos.spec_pb2'
# @@protoc_insertion_point(class_scope:syntaxnet.dragnn.TrainTarget)
))
_sym_db.RegisterMessage(TrainTarget)
_REGISTEREDMODULESPEC_PARAMETERSENTRY.has_options = True
_REGISTEREDMODULESPEC_PARAMETERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| 44.835366
| 3,978
| 0.746362
|
881abbd8eb85e291839aed465de65343ddaf3e92
| 2,671
|
py
|
Python
|
gradePredicition.py
|
wriggs12/Machine-Learning-Practice
|
6119a30adb68b296ecb5122731b2dd34cb6deeef
|
[
"MIT"
] | 1
|
2021-11-09T01:33:20.000Z
|
2021-11-09T01:33:20.000Z
|
gradePredicition.py
|
wriggs12/Machine-Learning-Practice
|
6119a30adb68b296ecb5122731b2dd34cb6deeef
|
[
"MIT"
] | 1
|
2021-10-30T22:11:15.000Z
|
2021-10-30T22:11:15.000Z
|
gradePredicition.py
|
wriggs12/Machine-Learning-Practice
|
6119a30adb68b296ecb5122731b2dd34cb6deeef
|
[
"MIT"
] | null | null | null |
#import helper libraries
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
import seaborn as sns
#Split data into training and testing sets
def splitTrainTest(features, labels):
return train_test_split(features, labels, test_size=0.25)
#Train the model using the training data
def trainModel(features, labels):
#Split data into test and training sets
xTrain, xTest, yTrain, yTest = splitTrainTest(features, labels)
#Create and train decision tree classifier
classifier = tree.DecisionTreeClassifier()
classifier.fit(xTrain, yTrain)
#Predict using the test data
predictions = classifier.predict(xTest)
#See the accuracy of the model
print(accuracy_score(yTest, predictions))
#Generates and displays heat map
def generateHeatMap(data):
sns.set_style('whitegrid')
corr = data.corr()
plt.figure(figsize=(10, 10))
sns.heatmap(corr, annot=True, cmap="Reds")
plt.title('Correlation Heatmap', fontsize=20)
plt.show()
#Main Program
def main():
print("Student Scores Predictions")
#input data from csv file
data = pd.read_csv('./Data/student-mat.csv')
#Label all features with numbers
labeler = LabelEncoder()
for column in data[
["school", "sex", "address", "famsize", "Pstatus", "Mjob", "Fjob", "reason", "guardian", "schoolsup", "famsup",
"paid", "activities", "nursery", "higher", "internet", "romantic"]].columns:
data[column] = labeler.fit_transform(data[column].values)
#Change grades to pass fail
data.loc[data['G3'] < 10, ['G3']] = 0
data.loc[data['G3'] >= 10, ['G3']] = 1
data.loc[data['G2'] < 10, ['G2']] = 0
data.loc[data['G2'] >= 10, ['G2']] = 1
data.loc[data['G1'] < 10, ['G1']] = 0
data.loc[data['G1'] >= 10, ['G1']] = 1
#Seperate the features from the labels
label = data.pop('G3')
features = data
print("\nModel Accuracy Knowing G1 & G2 Scores")
print("----------------------------------------")
trainModel(features, label)
features.drop(['G2'], axis=1, inplace=True)
print("\nModel Accuracy Knowing Only G1 Score")
print("----------------------------------------")
trainModel(features, label)
features.drop(['G1'], axis=1, inplace=True)
print("\nModel Accuracy Without Knowing Scores")
print("----------------------------------------")
trainModel(features, label)
if __name__ == '__main__':
main()
| 33.3875
| 120
| 0.622613
|
3f5d294788e51df469e75c713c5cc163ce89a1ce
| 5,481
|
py
|
Python
|
data_preperation/5_scenario_WP_in_radius.py
|
lvkoppen/TanzaniaWaterPredictions
|
3735a1b3964a2db8e8847a669ede5c78243c926d
|
[
"MIT"
] | 1
|
2021-09-20T10:59:36.000Z
|
2021-09-20T10:59:36.000Z
|
data_preperation/5_scenario_WP_in_radius.py
|
lvkoppen/TanzaniaWaterPredictions
|
3735a1b3964a2db8e8847a669ede5c78243c926d
|
[
"MIT"
] | null | null | null |
data_preperation/5_scenario_WP_in_radius.py
|
lvkoppen/TanzaniaWaterPredictions
|
3735a1b3964a2db8e8847a669ede5c78243c926d
|
[
"MIT"
] | null | null | null |
from asyncio.windows_events import NULL
import pandas as pd
import os
import sys
import numpy as np
import pprint as pp
import geopy.distance
from tqdm import tqdm
import timeit
import json
from multiprocessing import Manager, Pool
from functools import partial
pp = pp.PrettyPrinter(indent=2)
def get_path_to_data():
working_directory = os.path.split(os.getcwd())[0]
general_directory = os.path.split(working_directory)[0]
data_location = os.path.join(general_directory, "data")
trainingdataset = "trainingsetvalues" +".csv"
testdataset = "testsetvalues" + ".csv"
trainds = os.path.join(data_location, trainingdataset)
testds = os.path.join(data_location, testdataset)
path_dt= {"train": trainds, "test": testds}
return path_dt
def get_path_to_place_data():
working_directory = os.path.split(os.getcwd())[0]
general_directory = os.path.split(working_directory)[0]
data_location = os.path.join(general_directory, "data")
prepped_data_folder = os.path.join(data_location, "prepped_data")
prepped_file = '5_test_data' + ".csv"
return os.path.join(prepped_data_folder, prepped_file)
def get_data(path):
return pd.read_csv(path)
#standard columns to drop
#concatenation of source_type and extraction_type_group
def prepare_data(df_fresh):
df = df_fresh
df['source_extraction_type'] = df['source_type']+df['extraction_type_group']
df['gps_height'] = df['gps_height'].abs()
#replacing missing values with NaN type and incorrect values(zeroes)
df['construction_year'].replace(0, np.nan, inplace= True)
df['longitude'].replace(0, np.nan, inplace= True)
df['latitude'] = df['latitude'].apply(lambda x: np.where(x > -0.98, np.nan, x))
df["gps_height"]=df['gps_height'].apply(lambda x: np.where(x < 0.1, np.nan, x))
df['gps_height'] = df.groupby('subvillage', dropna = False)['gps_height'].transform(lambda x: x.fillna(x.mean()))
df['gps_height'] = df.groupby('ward', dropna = False)['gps_height'].transform(lambda x: x.fillna(x.mean()))
df['gps_height'] = df.groupby('region', dropna = False)['gps_height'].transform(lambda x: x.fillna(x.mean()))
df['date_recorded'] = pd.to_datetime(df["date_recorded"], format = '%Y-%m-%d', errors = 'coerce')
df.dropna(subset=['latitude', 'longitude'], inplace=True)
df['construction_year'] = df.groupby(['subvillage','extraction_type'], dropna = False)['construction_year'].transform(lambda x: x.fillna(x.median()))
df['construction_year'] = df.groupby(['ward','extraction_type'], dropna = False)['construction_year'].transform(lambda x: x.fillna(x.median()))
df['construction_year'] = df.groupby(['region','extraction_type'], dropna = False)['construction_year'].transform(lambda x: x.fillna(x.median()))
df["construction_year"]=df['construction_year'].apply(lambda x: np.where(x < 0, np.nan, x))
df['construction_year']= df['construction_year'].round()
df['construction_year'].replace(0, np.nan, inplace= True)
df.dropna(subset= ['construction_year'], inplace=True)
df['waterpoint_age'] = df['date_recorded'].dt.year - df['construction_year']
df.drop(df.loc[df['waterpoint_age'] < 0].index, inplace=True)
return df.copy()
def prepare_data_for_gps(df):
df_cols = df[['id','longitude', 'latitude']]
df_cols.set_index('id', inplace=True)
df_partial = df_cols.sort_values(by=['longitude', 'latitude'])
df_partial['latlon'] = list(zip(df_partial['latitude'], df_partial['longitude']))
return df_partial.copy()
def func_multiple_arguments(n, m, *args, **kwargs):
return n, m
def mp_execution(df_partial, id):
row_data = df_partial.loc[id,:]
mask = (df_partial['latitude'].between(row_data.latlon[0]-1, row_data.latlon[0]+1) &
df_partial['longitude'].between(row_data.latlon[1]-1, row_data.latlon[1]+1))
id_set = df_partial.index[mask]
within_range = set()
for ix in id_set:
if ix != id:
r_value = df_partial.loc[ix,:]
distance = geopy.distance.distance(row_data.latlon ,r_value.latlon)
if distance <= 10:
within_range.add(ix)
return id, within_range
def main(df, ids):
df_partial = df
def get_nearby_waterpoints():
pool = Pool(11)
results = tqdm(pool.imap_unordered(partial(mp_execution, df_partial), list(ids), chunksize=40), total= len(ids))
for id, l in results:
nearby_dict[id] = list(l)
with open('test_nearbydict.json', 'w') as fp:
json.dump(dict(nearby_dict), fp)
manager = Manager()
nearby_dict = manager.dict()
start = timeit.default_timer()
get_nearby_waterpoints()
end = timeit.default_timer()
total = end - start
print('Total time: {}'.format(total))
#df.to_csv(prepped_data_file_location, index= False)
if __name__ == "__main__":
path_to_data = get_path_to_data()
df_test = NULL
df_train = NULL
for key,value in path_to_data.items():
if (key == "train"):
df_train = get_data(value)
elif (key == "test"):
df_test = get_data(value)
combined_df = pd.concat([df_test, df_train])
df2 = prepare_data(combined_df)
mask_df = df2.id.isin(df_test.id)
df_final = prepare_data_for_gps(df2)
df_test_index = df2.id[mask_df]
main(df_final, df_test_index.to_list())
| 30.966102
| 153
| 0.665937
|
fe5c904340442187e9408991407f9b68528a400d
| 7,957
|
py
|
Python
|
release/stubs.min/System/Windows/Media/Animation_parts/ElasticEase.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Media/Animation_parts/ElasticEase.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Media/Animation_parts/ElasticEase.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class ElasticEase(EasingFunctionBase, ISealable, IEasingFunction):
"""
Represents an easing function that creates an animation that resembles a spring oscillating back and forth until it comes to rest.
ElasticEase()
"""
def CloneCore(self, *args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValueCore(self, *args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified System.Windows.Freezable
using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateInstance(self, *args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self, *args):
"""
CreateInstanceCore(self: ElasticEase) -> Freezable
Creates a new instance of the System.Windows.Freezable derived class. When creating a derived
class,you must override this method.
Returns: The new instance.
"""
pass
def EaseInCore(self, *args):
"""
EaseInCore(self: ElasticEase,normalizedTime: float) -> float
Provides the logic portion of the easing function that you can override to produce the
System.Windows.Media.Animation.EasingMode.EaseIn mode of the custom easing function.
normalizedTime: Normalized time (progress) of the animation.
Returns: A double that represents the transformed progress.
"""
pass
def FreezeCore(self, *args):
"""
FreezeCore(self: Freezable,isChecking: bool) -> bool
Makes the System.Windows.Freezable object unmodifiable or tests whether it can be made
unmodifiable.
isChecking: true to return an indication of whether the object can be frozen (without actually freezing it);
false to actually freeze the object.
Returns: If isChecking is true,this method returns true if the System.Windows.Freezable can be made
unmodifiable,or false if it cannot be made unmodifiable. If isChecking is false,this method
returns true if the if the specified System.Windows.Freezable is now unmodifiable,or false if
it cannot be made unmodifiable.
"""
pass
def GetAsFrozenCore(self, *args):
"""
GetAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a frozen clone of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The instance to copy.
"""
pass
def GetCurrentValueAsFrozenCore(self, *args):
"""
GetCurrentValueAsFrozenCore(self: Freezable,sourceFreezable: Freezable)
Makes the current instance a frozen clone of the specified System.Windows.Freezable. If the
object has animated dependency properties,their current animated values are copied.
sourceFreezable: The System.Windows.Freezable to copy and freeze.
"""
pass
def OnChanged(self, *args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self, *args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self, *args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self, *args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
def ShouldSerializeProperty(self, *args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def WritePostscript(self, *args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self, *args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Oscillations = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the number of times the target slides back and forth over the animation destination.
Get: Oscillations(self: ElasticEase) -> int
Set: Oscillations(self: ElasticEase)=value
"""
Springiness = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets or sets the stiffness of the spring. The smaller the Springiness value is,the stiffer the spring and the faster the elasticity decreases in intensity over each oscillation.
Get: Springiness(self: ElasticEase) -> float
Set: Springiness(self: ElasticEase)=value
"""
OscillationsProperty = None
SpringinessProperty = None
| 25.834416
| 221
| 0.686817
|
e0f91717bfecfd0c6c543a04c1949cee414f92b9
| 635
|
py
|
Python
|
tests/unit/shared/test_initialize_snow.py
|
lingyunan0510/VIC
|
dbc00a813b5df5a88027d1dc57a7805e9a464436
|
[
"MIT"
] | 1
|
2022-01-18T01:23:47.000Z
|
2022-01-18T01:23:47.000Z
|
tests/unit/shared/test_initialize_snow.py
|
yusheng-wang/VIC
|
8f6cc0661bdc67c4f6caabdd4dcd0b8782517435
|
[
"MIT"
] | null | null | null |
tests/unit/shared/test_initialize_snow.py
|
yusheng-wang/VIC
|
8f6cc0661bdc67c4f6caabdd4dcd0b8782517435
|
[
"MIT"
] | null | null | null |
# import pytest
# from vic import lib as vic_lib
# @pytest.fixture()
# def veg_num(scope='function'):
# return 5
# @pytest.fixture()
# def snow(veg_num, scope='function'):
# return vic_lib.make_snow_data(veg_num)
# def test_initialize_snow():
# raise NotImplementedError('problems here in test_initialize_snow.py')
# veg_num = 5
# snow = vic_lib.make_snow_data(veg_num)
# assert vic_lib.initialize_snow(snow, 5) is None
# assert snow[0][0].albedo == 0.
# def test_initialize_snow_bools(snow, veg_num):
# assert vic_lib.initialize_snow(snow, veg_num) is None
# assert not snow[0][0].MELTING
| 23.518519
| 75
| 0.696063
|
50dbe9fac95ed38331030aada231ddd750a72aeb
| 20,761
|
py
|
Python
|
analysis/checks/collection/gps_checks.py
|
leozz37/makani
|
c94d5c2b600b98002f932e80a313a06b9285cc1b
|
[
"Apache-2.0"
] | 1,178
|
2020-09-10T17:15:42.000Z
|
2022-03-31T14:59:35.000Z
|
analysis/checks/collection/gps_checks.py
|
leozz37/makani
|
c94d5c2b600b98002f932e80a313a06b9285cc1b
|
[
"Apache-2.0"
] | 1
|
2020-05-22T05:22:35.000Z
|
2020-05-22T05:22:35.000Z
|
analysis/checks/collection/gps_checks.py
|
leozz37/makani
|
c94d5c2b600b98002f932e80a313a06b9285cc1b
|
[
"Apache-2.0"
] | 107
|
2020-09-10T17:29:30.000Z
|
2022-03-18T09:00:14.000Z
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checklist items for GPS."""
from makani.analysis.checks import base_check
from makani.analysis.checks import check_range
from makani.avionics.common import novatel_types
from makani.avionics.common import septentrio_types
from makani.lib.python import c_helpers
import numpy
NOVATEL_SOLUTION_STATUS_HELPER = c_helpers.EnumHelper(
'NovAtelSolutionStatus', novatel_types)
SEPTENTRIO_ERROR_HELPER = c_helpers.EnumHelper(
'SeptentrioPvtError', septentrio_types)
# These are the aio_nodes for NovAtel and Septentrio
NOVATEL_SOURCES = ['FcA', 'FcB']
SEPTENTRIO_SOURCES = []
# These are the upper bounds of the normal and warning ranges
# for pos_sigma and vel_sigma. If a value is higher than the upper bound
# of the warning range, then we will throw an error.
POS_SIGMA_NORMAL_UPPER = 0.2
POS_SIGMA_WARNING_UPPER = 0.5
VEL_SIGMA_NORMAL_UPPER = 0.1
VEL_SIGMA_WARNING_UPPER = 0.3
class BaseNovAtelCheck(base_check.BaseCheckItem):
"""Base class for NovAtel checks."""
@base_check.RegisterSpecs
def __init__(self, for_log, source,
normal_ranges=check_range.AllInclusiveRange(),
warning_ranges=check_range.AllInclusiveRange()):
self._source = source
super(BaseNovAtelCheck, self).__init__(for_log, normal_ranges,
warning_ranges)
class BaseSeptentrioCheck(base_check.BaseCheckItem):
"""Base class for Septentrio checks."""
@base_check.RegisterSpecs
def __init__(self, for_log, source,
normal_ranges=check_range.AllInclusiveRange(),
warning_ranges=check_range.AllInclusiveRange()):
self._source = source
super(BaseSeptentrioCheck, self).__init__(for_log, normal_ranges,
warning_ranges)
class NovAtelSolutionTypeCheck(BaseNovAtelCheck):
"""Class to check NovAtel solution type."""
@base_check.RegisterSpecs
def __init__(self, for_log, source):
super(NovAtelSolutionTypeCheck, self).__init__(for_log, source)
def _RegisterInputs(self):
return [
self._Arg('NovAtelSolution', self._source, 'best_xyz.pos_sol_status')
]
@base_check.SkipIfAnyInputIsNone
def _Check(self, solution_type):
"""Checks that the solution type is 'SolComputed' else raises warning."""
zero_range = check_range.Singleton(0)
for error_name, error_value in NOVATEL_SOLUTION_STATUS_HELPER:
# Skip 'SolComputed' because that is what we want the status to be.
if error_value == 0:
continue
# Raise a warning if the status is equal to the error_value.
self._CheckForFailure(self._source + ' ' + error_name,
numpy.array([int(s == error_value) for s in
solution_type]),
zero_range, False)
class NovAtelDiffCheck(BaseNovAtelCheck):
"""Class to check the NovAtel diff_age."""
@base_check.RegisterSpecs
def __init__(self, for_log, source):
normal_ranges = check_range.Interval((None, 3))
warning_ranges = check_range.AllInclusiveRange()
super(NovAtelDiffCheck, self).__init__(for_log, source, normal_ranges,
warning_ranges)
def _RegisterInputs(self):
return [
self._Arg('NovAtelSolution', self._source, 'best_xyz.diff_age')
]
@base_check.SkipIfAnyInputIsNone
def _Check(self, diff_age):
self._CheckByRange(self._source + ' NovAtel diff_age too high', diff_age,
self._normal_ranges, self._warning_ranges)
class NovAtelPosSigmaCheck(BaseNovAtelCheck):
"""Class to check position sigma for NovAtel GPS."""
@base_check.RegisterSpecs
def __init__(self, for_log, source):
normal_ranges = check_range.Interval((0, POS_SIGMA_NORMAL_UPPER))
warning_ranges = check_range.Interval((0, POS_SIGMA_WARNING_UPPER))
super(NovAtelPosSigmaCheck, self).__init__(for_log, source, normal_ranges,
warning_ranges)
def _RegisterInputs(self):
"""Register data to be used for the sigmas check."""
return [
self._Arg('NovAtelSolution', self._source, 'best_xyz.pos_x_sigma'),
self._Arg('NovAtelSolution', self._source, 'best_xyz.pos_y_sigma'),
self._Arg('NovAtelSolution', self._source, 'best_xyz.pos_z_sigma')
]
@base_check.SkipIfAnyInputIsNone
def _Check(self, x, y, z):
if self._for_log:
length = len(x)
pos_sigma = numpy.array([numpy.linalg.norm([x[i], y[i], z[i]])
for i in xrange(length)])
else:
pos_sigma = numpy.linalg.norm([x, y, z])
self._CheckByRange(self._source + ' NovAtel pos_sigma out of range',
pos_sigma, self._normal_ranges, self._warning_ranges)
class NovAtelVelSigmaCheck(BaseNovAtelCheck):
"""Class to check velocity sigma for NovAtel GPS."""
@base_check.RegisterSpecs
def __init__(self, for_log, source):
normal_ranges = check_range.Interval((0, VEL_SIGMA_NORMAL_UPPER))
warning_ranges = check_range.Interval((0, VEL_SIGMA_WARNING_UPPER))
super(NovAtelVelSigmaCheck, self).__init__(for_log, source, normal_ranges,
warning_ranges)
def _RegisterInputs(self):
"""Register data to be used for the sigmas check."""
return [
self._Arg('NovAtelSolution', self._source, 'best_xyz.vel_x_sigma'),
self._Arg('NovAtelSolution', self._source, 'best_xyz.vel_y_sigma'),
self._Arg('NovAtelSolution', self._source, 'best_xyz.vel_z_sigma')
]
@base_check.SkipIfAnyInputIsNone
def _Check(self, x, y, z):
if self._for_log:
length = len(x)
vel_sigma = numpy.array([numpy.linalg.norm([x[i], y[i], z[i]])
for i in xrange(length)])
else:
vel_sigma = numpy.linalg.norm([x, y, z])
self._CheckByRange(self._source + ' NovAtel vel_sigma out of range',
vel_sigma, self._normal_ranges, self._warning_ranges)
class SeptentrioPvtCartesianErrorCheck(BaseSeptentrioCheck):
"""Class to check Septentrio pvt_cartesian.error."""
@base_check.RegisterSpecs
def __init(self, for_log, source):
super(SeptentrioPvtCartesianErrorCheck, self).__init__(for_log, source)
def _RegisterInputs(self):
return [
self._Arg('SeptentrioSolution', self._source, 'pvt_cartesian.error')
]
@base_check.SkipIfAnyInputIsNone
def _Check(self, status):
"""Checks that the solution type is 'SolComputed' else raises warning."""
zero_range = check_range.Singleton(0)
# Check all possible SeptentrioPvtError values.
for error_name, error_value in SEPTENTRIO_ERROR_HELPER:
# Skip the 'None' error.
if error_value == 0:
continue
# Throw a warning if the status is equal to the error_value
self._CheckForFailure(self._source + ' ' + error_name,
numpy.array([int(s == error_value)
for s in status]),
zero_range, False)
class SeptentrioMeanCorrAgeCheck(BaseSeptentrioCheck):
"""Class to check Septentrio pvt_cartesian._mean_corr_age."""
@base_check.RegisterSpecs
def __init__(self, for_log, source):
normal_ranges = check_range.Interval((None, 300))
warning_ranges = check_range.AllExclusiveRange()
super(SeptentrioMeanCorrAgeCheck, self).__init__(for_log, source,
normal_ranges,
warning_ranges)
def _RegisterInputs(self):
return [
self._Arg('SeptentrioSolution', self._source,
'pvt_cartesian.mean_corr_age')
]
@base_check.SkipIfAnyInputIsNone
def _Check(self, mean_corr_age):
self._CheckByRange(self._source + ' Septentrio mean_corr_age too high',
mean_corr_age, self._normal_ranges,
self._warning_ranges)
class SeptentrioPosSigmaCheck(BaseSeptentrioCheck):
"""Class to check pos_sigma for Septentrio GPS."""
@base_check.RegisterSpecs
def __init__(self, for_log, source):
normal_ranges = check_range.Interval((0, POS_SIGMA_NORMAL_UPPER))
warning_ranges = check_range.Interval((0, POS_SIGMA_WARNING_UPPER))
super(SeptentrioPosSigmaCheck, self).__init__(for_log, source,
normal_ranges,
warning_ranges)
def _RegisterInputs(self):
"""Register data to be used for the sigmas check."""
return [
self._Arg('SeptentrioSolution', self._source,
'pos_cov_cartesian.cov_xx'),
self._Arg('SeptentrioSolution', self._source,
'pos_cov_cartesian.cov_yy'),
self._Arg('SeptentrioSolution', self._source,
'pos_cov_cartesian.cov_zz'),
]
@base_check.SkipIfAnyInputIsNone
def _Check(self, x, y, z):
if self._for_log:
length = len(x)
pos_sigma = numpy.array([numpy.sqrt(numpy.sum([x[i], y[i], z[i]]))
for i in xrange(length)])
else:
pos_sigma = numpy.linalg.norm([x, y, z])
self._CheckByRange(self._source + ' Septentrio pos_sigma out of range',
pos_sigma, self._normal_ranges, self._warning_ranges)
class SeptentrioVelSigmaCheck(BaseSeptentrioCheck):
"""Class to check pos_sigma for Septentrio GPS."""
@base_check.RegisterSpecs
def __init__(self, for_log, source):
normal_ranges = check_range.Interval((0, VEL_SIGMA_NORMAL_UPPER))
warning_ranges = check_range.Interval((0, VEL_SIGMA_WARNING_UPPER))
super(SeptentrioVelSigmaCheck, self).__init__(for_log, source,
normal_ranges,
warning_ranges)
def _RegisterInputs(self):
"""Register data to be used for the sigmas check."""
return [
self._Arg('SeptentrioSolution', self._source,
'vel_cov_cartesian.cov_xx'),
self._Arg('SeptentrioSolution', self._source,
'vel_cov_cartesian.cov_yy'),
self._Arg('SeptentrioSolution', self._source,
'vel_cov_cartesian.cov_zz'),
]
@base_check.SkipIfAnyInputIsNone
def _Check(self, x, y, z):
if self._for_log:
length = len(x)
vel_sigma = numpy.array([numpy.sqrt(numpy.sum([x[i], y[i], z[i]]))
for i in xrange(length)])
else:
vel_sigma = numpy.linalg.norm([x, y, z])
self._CheckByRange(self._source + ' Septentrio vel_sigma out of range',
vel_sigma, self._normal_ranges, self._warning_ranges)
class AgreementCheck(base_check.BaseCheckItem):
"""Class to check whether the two GPSes agree."""
@base_check.RegisterSpecs
def __init__(self, for_log, gps_type_per_source,
normal_ranges=check_range.AllInclusiveRange(),
warning_ranges=check_range.AllInclusiveRange()):
assert 'FcA' in gps_type_per_source and 'FcB' in gps_type_per_source
self._gps_type_per_source = gps_type_per_source
super(AgreementCheck, self).__init__(for_log, normal_ranges, warning_ranges)
def _RegisterInputs(self):
"""Register data to be used for the sigmas check."""
args = []
for source in ['FcA', 'FcB']:
gps_type = self._gps_type_per_source[source]
if gps_type == 'Septentrio':
args += [
self._Arg('SeptentrioSolution', source, 'pvt_cartesian.x'),
self._Arg('SeptentrioSolution', source, 'pvt_cartesian.y'),
self._Arg('SeptentrioSolution', source, 'pvt_cartesian.z'),
self._Arg('SeptentrioSolution', source, 'pvt_cartesian.mode'),
self._Arg('SeptentrioSolution', source,
'pvt_cartesian.timestamp.tow'),
]
elif gps_type == 'NovAtel':
args += [
self._Arg('NovAtelSolution', source, 'best_xyz.pos_x'),
self._Arg('NovAtelSolution', source, 'best_xyz.pos_y'),
self._Arg('NovAtelSolution', source, 'best_xyz.pos_z'),
self._Arg('NovAtelSolution', source, 'best_xyz.pos_type'),
self._Arg('NovAtelSolution', source, 'best_xyz.timestamp.tow'),
]
else:
assert False
return args
def _GetGpsModeAndValidity(self, gps_mode, gps_type):
if gps_type == 'Septentrio':
gps_mode &= septentrio_types.kSeptentrioPvtModeBitSolutionMask
gps_valid = (
gps_mode == septentrio_types.kSeptentrioPvtModeRtkFixed)
elif gps_type == 'NovAtel':
gps_valid = (
(gps_mode == novatel_types.kNovAtelSolutionTypeL1Int) |
(gps_mode == novatel_types.kNovAtelSolutionTypeNarrowInt) |
(gps_mode == novatel_types.kNovAtelSolutionTypeWideInt))
else:
assert False
return gps_mode, gps_valid
@base_check.SkipIfAnyInputIsNone
def _Check(self, fca_gps_x, fca_gps_y, fca_gps_z, fca_gps_mode,
fca_gps_tow, fcb_gps_x, fcb_gps_y, fcb_gps_z, fcb_gps_mode,
fcb_gps_tow):
fca_gps_mode, fca_gps_valid = self._GetGpsModeAndValidity(
fca_gps_mode, self._gps_type_per_source['FcA'])
fcb_gps_mode, fcb_gps_valid = self._GetGpsModeAndValidity(
fcb_gps_mode, self._gps_type_per_source['FcB'])
if (not self._for_log) and not (fca_gps_valid and fcb_gps_valid):
return
if self._for_log:
# We are using tow to find same-time updates on two GPSes.
# TODO: This needs to be timestamp (combining tow and week) when
# we start to have logs that span for more than 7 days.
fcb_gps_tow, fcb_gps_unique_index = numpy.unique(
fcb_gps_tow, return_index=True)
fca_gps_tow, fca_gps_unique_index = numpy.unique(
fca_gps_tow, return_index=True)
fcb_gps_overlap_mask = numpy.in1d(fcb_gps_tow, fca_gps_tow)
fcb_gps_x = fcb_gps_x[fcb_gps_unique_index][fcb_gps_overlap_mask]
fcb_gps_y = fcb_gps_y[fcb_gps_unique_index][fcb_gps_overlap_mask]
fcb_gps_z = fcb_gps_z[fcb_gps_unique_index][fcb_gps_overlap_mask]
fca_gps_overlap_mask = numpy.in1d(fca_gps_tow, fcb_gps_tow)
fca_gps_x = fca_gps_x[fca_gps_unique_index][fca_gps_overlap_mask]
fca_gps_y = fca_gps_y[fca_gps_unique_index][fca_gps_overlap_mask]
fca_gps_z = fca_gps_z[fca_gps_unique_index][fca_gps_overlap_mask]
assert fcb_gps_x.size == fca_gps_x.size
if not fcb_gps_x.size:
return
diff = numpy.sqrt(
(fcb_gps_x - fca_gps_x) ** 2 + (fcb_gps_y - fca_gps_y) ** 2 +
(fcb_gps_z - fca_gps_z) ** 2)
self._CheckByRange(
'GPS (%s) does not agree with GPS (%s)' % ('FcA', 'FcB'),
diff, self._normal_ranges, self._warning_ranges)
class BaseCn0Checker(base_check.BaseCheckItem):
"""The monitor to check GPS carrier-to-noise ratio."""
@base_check.RegisterSpecs
def __init__(self, for_log, message_type, source, cn0_field, num_field,
type_field, name):
"""Initialize the voltage checker for a given servo.
Args:
for_log: True if this check is performed over a log. False if it is for
realtime AIO messages.
message_type: Type of the message.
source: The AIO node that sent the message.
cn0_field: Path to the C/N0 field.
num_field: Path to the num_obs field.
type_field: The type to extract particular signals.
name: Name of the check item.
"""
self._message_type = message_type
self._source = source
self._cn0_field = cn0_field
self._num_field = num_field
self._type_field = type_field
super(BaseCn0Checker, self).__init__(for_log, name=name)
def Cn0Field(self):
return self._cn0_field
def NumField(self):
return self._num_field
def TypeField(self):
return self._type_field
def _GetValidCn0(self, cn0, type_bits):
"""Select and compute valid C/N0 values.
Args:
cn0: A NumPy array of raw Cn0 values.
type_bits: A NumPy array of signal types.
Returns:
A NumPy array of valid Cn0 values.
"""
raise NotImplementedError
def _RegisterInputs(self):
"""Register what fields will be used to calculate the check results."""
data = []
data.append(self._Arg(
self._message_type, self._source, self._cn0_field))
data.append(self._Arg(
self._message_type, self._source, self._num_field))
data.append(self._Arg(
self._message_type, self._source, self._type_field))
return data
def GetAvgAndMaxCn0FromTimeSeries(self, cn0, num_obs, type_bits):
avg_cn0s = []
max_cn0s = []
for n in range(cn0.shape[0]):
avg_cn0, max_cn0, _ = self.GetAvgAndMaxCn0(
cn0[n], num_obs[n], type_bits[n])
avg_cn0s.append(avg_cn0)
max_cn0s.append(max_cn0)
return numpy.array(avg_cn0s), numpy.array(max_cn0s)
def GetAvgAndMaxCn0(self, cn0, num_obs, type_bits):
if num_obs == 0:
return float('nan'), float('nan'), 0
cn0 = numpy.array(cn0[:num_obs])
type_bits = numpy.array(type_bits[:num_obs])
cn0 = self._GetValidCn0(cn0, type_bits)
if cn0.size:
return numpy.average(cn0), numpy.max(cn0), len(cn0)
else:
return float('nan'), float('nan'), 0
@base_check.SkipIfAnyInputIsNone
def _Check(self, cn0, num_obs, type_bits):
"""Check the carrier-to-noise ratio."""
if num_obs == 0:
return
if self._for_log:
avg_cn0, max_cn0 = self.GetAvgAndMaxCn0FromTimeSeries(
cn0, num_obs, type_bits)
else:
avg_cn0, max_cn0, _ = self.GetAvgAndMaxCn0(cn0, num_obs, type_bits)
avg_ranges = check_range.Interval([40.0, None])
max_ranges = check_range.Interval([45.0, None])
all_inclusive = check_range.AllInclusiveRange()
self._CheckByRange('%s (Avg)' % self._name, avg_cn0, avg_ranges,
all_inclusive)
self._CheckByRange('%s (Max)' % self._name, max_cn0, max_ranges,
all_inclusive)
class NovAtelCn0Checker(BaseCn0Checker):
"""Cn0 check for NovAtel receivers."""
@base_check.RegisterSpecs
def __init__(self, for_log, source, name):
# See NovAtel OEM6 documentation for RANGE log. Table 126: Channel
# Tracking Status on page 604 (rev 8) documents the meaning of the
# status bits.
super(NovAtelCn0Checker, self).__init__(
for_log, 'NovAtelObservations', source, 'range.cn0', 'range.num_obs',
'range.status_bits', name)
def _GetValidCn0(self, cn0, type_bits):
return cn0[(type_bits & 0x03E70000) == 0]
class SeptentrioCn0Checker(BaseCn0Checker):
"""Cn0 check for Septentrio receivers."""
@base_check.RegisterSpecs
def __init__(self, for_log, source, name):
# See Septentrio's SBF Reference Guide. Table 2.10: Signal Type.
super(SeptentrioCn0Checker, self).__init__(
for_log, 'SeptentrioObservations', source, 'meas_epoch.cn0',
'meas_epoch.num_obs', 'meas_epoch.type', name)
def _GetValidCn0(self, cn0, type_bits):
# See page 18 of AsteRx-m Firmware v3.3.0 SBF Reference Guide.
assert type_bits.itemsize == 8
# Check bits 0-4 to select only L1 C/A signals.
# Need to convert to float to avoid numpy's float->int casting restrictions.
valid_cn0 = cn0[(type_bits & 0x1F) == 0].astype(float)
valid_cn0 *= 0.25
valid_cn0 += 10.0
return valid_cn0
class GpsChecks(base_check.ListOfChecks):
"""The GPS checklist."""
def __init__(self, for_log):
self._items_to_check = [
AgreementCheck(
for_log, {'FcA': 'NovAtel', 'FcB': 'NovAtel'}, [[0.0, 1.0]]),
]
for source in NOVATEL_SOURCES:
self._items_to_check += [
NovAtelSolutionTypeCheck(for_log, source),
NovAtelDiffCheck(for_log, source),
NovAtelPosSigmaCheck(for_log, source),
NovAtelVelSigmaCheck(for_log, source),
]
for source in SEPTENTRIO_SOURCES:
self._items_to_check += [
SeptentrioPvtCartesianErrorCheck(for_log, source),
SeptentrioMeanCorrAgeCheck(for_log, source),
SeptentrioPosSigmaCheck(for_log, source),
SeptentrioVelSigmaCheck(for_log, source),
]
| 37.542495
| 80
| 0.667502
|
b3dbe9f8ca3316dd2aa563a7c72579345e64e36d
| 8,674
|
py
|
Python
|
pytrek/gui/HelpView.py
|
hasii2011/PyArcadeStarTrek
|
370edbb62f15f69322aa7f109d6d36ebf20cbe4a
|
[
"MIT"
] | 1
|
2021-06-13T00:56:24.000Z
|
2021-06-13T00:56:24.000Z
|
pytrek/gui/HelpView.py
|
hasii2011/PyArcadeStarTrek
|
370edbb62f15f69322aa7f109d6d36ebf20cbe4a
|
[
"MIT"
] | 94
|
2021-04-16T20:34:10.000Z
|
2022-01-13T19:58:20.000Z
|
pytrek/gui/HelpView.py
|
hasii2011/PyArcadeStarTrek
|
370edbb62f15f69322aa7f109d6d36ebf20cbe4a
|
[
"MIT"
] | null | null | null |
from typing import Dict
from typing import Callable
from logging import Logger
from logging import getLogger
from collections import namedtuple
from arcade import Texture
from arcade import View
from arcade import color
from arcade.gui import UIAnchorWidget
from arcade.gui import UIBoxLayout
from arcade.gui import UILabel
from arcade.gui import UIManager
from arcade.gui import UIMouseScrollEvent
from arcade.gui import UIOnClickEvent
from arcade.gui import UIPadding
from arcade.gui import UITextArea
from arcade.gui import UITextureButton
from arcade.gui import UITexturePane
from arcade import start_render
from arcade import load_texture
from pytrek.LocateResources import LocateResources
CreateTextResponse = namedtuple('CreateTextResponse', 'textArea, texturePane')
class HelpView(View):
FONT_NAME: str = 'UniverseCondensed'
def __init__(self, completeCallback: Callable):
super().__init__()
self.logger: Logger = getLogger(__name__)
self._completeCallback: Callable = completeCallback
self._uiManager: UIManager = UIManager()
self._uiManager.enable()
title: UILabel = self._createLabel(text='PyArcadeStarTrek Help', height=24, fontSize=18)
createTextResponse: CreateTextResponse = self._createHelpTextArea()
wrappedHelpTextArea: UITexturePane = createTextResponse.texturePane
self._helpTextArea: UITextArea = createTextResponse.textArea
padding: UIPadding = UIPadding(child=wrappedHelpTextArea, padding=(4, 4, 4, 4))
buttonBox: UIBoxLayout = self._createScrollButtonContainer()
hBox: UIBoxLayout = UIBoxLayout(vertical=False,
children=[
padding.with_border(width=2, color=color.WHITE).with_space_around(bottom=10, top=10),
buttonBox.with_space_around(left=15, top=20),
])
okButton: UITextureButton = self._createOkButton()
mainBox: UIBoxLayout = UIBoxLayout(vertical=True,
children=[
title.with_space_around(top=20),
hBox,
okButton
])
self._uiManager.add(
UIAnchorWidget(
anchor_x="center_x",
anchor_y="top",
child=mainBox)
)
def on_draw(self):
"""
Draw this view
"""
start_render()
self._uiManager.draw()
def _createLabel(self, text: str = '', height: int = 16, fontSize: int = 12) -> UILabel:
uiLabel: UILabel = UILabel(text=text, font_name=HelpView.FONT_NAME, height=height, font_size=fontSize, bold=True)
return uiLabel
def _createScrollButtonContainer(self) -> UIBoxLayout:
upButton: UITextureButton = self._createTextureButton(bareFileName='ArrowUp')
downButton: UITextureButton = self._createTextureButton(bareFileName='ArrowDown')
buttonBox: UIBoxLayout = UIBoxLayout(vertical=True,
children=[
upButton.with_space_around(top=20),
downButton.with_space_around(bottom=10, top=10)
])
@upButton.event('on_click')
def onClickUp(event: UIOnClickEvent):
self._onClickUp(event)
@downButton.event('on_click')
def onClickDown(event: UIOnClickEvent):
self._onClickDown(event)
return buttonBox
def _createHelpTextArea(self) -> CreateTextResponse:
"""
Creates and loads the help text
Returns: A named tuple that has the texture pane and the text area widgets
"""
fqFileName: str = LocateResources.getResourcesPath(resourcePackageName=LocateResources.RESOURCES_PACKAGE_NAME,
bareFileName='Help.txt')
with open(fqFileName) as fd:
lines: str = fd.read()
textArea: UITextArea = UITextArea(width=550, height=360,
text=lines,
text_color=color.BLACK,
font_name=HelpView.FONT_NAME)
textureFileName: str = LocateResources.getResourcesPath(resourcePackageName=LocateResources.IMAGE_RESOURCES_PACKAGE_NAME,
bareFileName='GreyPanel.png')
background: Texture = load_texture(textureFileName)
texturePane: UITexturePane = UITexturePane(
textArea.with_space_around(right=20),
tex=background,
padding=(10, 10, 10, 10)
)
return CreateTextResponse(textArea=textArea, texturePane=texturePane)
def _createTextureButton(self, bareFileName: str) -> UITextureButton:
normalFileName: str = f'{bareFileName}.png'
pressedFileName: str = f'{bareFileName}Pressed.png'
hoveredFileName: str = f'{bareFileName}Hovered.png'
fqNormalFileName: str = LocateResources.getResourcesPath(LocateResources.IMAGE_RESOURCES_PACKAGE_NAME, bareFileName=normalFileName)
fqPressedFileName: str = LocateResources.getResourcesPath(LocateResources.IMAGE_RESOURCES_PACKAGE_NAME, bareFileName=pressedFileName)
fqHoveredFileName: str = LocateResources.getResourcesPath(LocateResources.IMAGE_RESOURCES_PACKAGE_NAME, bareFileName=hoveredFileName)
normalTexture: Texture = load_texture(fqNormalFileName)
pressedTexture: Texture = load_texture(fqPressedFileName)
hoveredTexture: Texture = load_texture(fqHoveredFileName)
button: UITextureButton = UITextureButton(texture=normalTexture,
texture_pressed=pressedTexture,
texture_hovered=hoveredTexture,
width=32, height=32)
return button
def _createOkButton(self) -> UITextureButton:
buttonFileName: str = LocateResources.getResourcesPath(LocateResources.IMAGE_RESOURCES_PACKAGE_NAME, bareFileName='HelpOkButton.png')
pressedButtonFileName: str = LocateResources.getResourcesPath(LocateResources.IMAGE_RESOURCES_PACKAGE_NAME, bareFileName='HelpOkButtonPressed.png')
hoveredButtonFileName: str = LocateResources.getResourcesPath(LocateResources.IMAGE_RESOURCES_PACKAGE_NAME, bareFileName='HelpOkButtonHovered.png')
okButtonTexture: Texture = load_texture(buttonFileName)
okButtonPressedTexture: Texture = load_texture(pressedButtonFileName)
okButtonHoveredTexture: Texture = load_texture(hoveredButtonFileName)
buttonStyle: Dict = {'font_name': 'arial',
'font_size': 12
}
okButton: UITextureButton = UITextureButton(width=35, height=35,
texture=okButtonTexture,
texture_pressed=okButtonPressedTexture,
texture_hovered=okButtonHoveredTexture,
style=buttonStyle)
@okButton.event('on_click')
def onClickOk(event: UIOnClickEvent):
self._onClickOk(event)
return okButton
def _onClickUp(self, event: UIOnClickEvent):
self.__scrollHelp(event, -2)
def _onClickDown(self, event: UIOnClickEvent):
self.__scrollHelp(event, 2)
# noinspection PyUnusedLocal
def _onClickOk(self, event: UIOnClickEvent):
self._completeCallback()
def __scrollHelp(self, event: UIOnClickEvent, scroll_y: int):
"""
This is my hack to do scrolling. I do not kno2 how to post an event on arcade's
UI event queue; Not sure if that is possible at this point
Only scroll in the vertical direction
Args:
event: Some UI event
scroll_y: How much to scroll; Negative numbers scroll up
"""
x = self._helpTextArea.center_x
y = self._helpTextArea.center_y
mouseEvent: UIMouseScrollEvent = UIMouseScrollEvent(source=event.source, scroll_y=scroll_y, scroll_x=0, x=x, y=y)
self._helpTextArea.on_event(mouseEvent)
| 41.109005
| 155
| 0.615979
|
ae76086dddd2919d2ca6f02ef18f78d6452ecdfe
| 1,556
|
py
|
Python
|
django_jwt_extended/apps.py
|
iml1111/django-jwt-extended
|
37e90aa1b830333b2bf66449e85d5747b4be74e1
|
[
"MIT"
] | null | null | null |
django_jwt_extended/apps.py
|
iml1111/django-jwt-extended
|
37e90aa1b830333b2bf66449e85d5747b4be74e1
|
[
"MIT"
] | null | null | null |
django_jwt_extended/apps.py
|
iml1111/django-jwt-extended
|
37e90aa1b830333b2bf66449e85d5747b4be74e1
|
[
"MIT"
] | 1
|
2022-01-24T16:02:39.000Z
|
2022-01-24T16:02:39.000Z
|
from datetime import timedelta
from django.apps import AppConfig
from django.conf import settings
from .config import ConfigParser
from .exceptions import NotFoundSecretKey
class DjangoJwtExtConfig(AppConfig):
name = 'django_jwt_extended'
verbose_name = "Django JWT Extended"
def ready(self):
if not hasattr(settings, 'SECRET_KEY'):
raise NotFoundSecretKey()
data = ConfigParser(
settings.JWT_CONFIG
if hasattr(settings, 'JWT_CONFIG')
else {}
)
self.jwt_algorithm = data.jwt_algorithm
self.token_location = data.token_location
self.access_token_expires = data.access_token_expires
self.refresh_token_expires = data.refresh_token_expires
self.token_header_name = 'Authorization'
# default error messages
self.jwt_not_found_msg = {
'msg': 'JWT token not found'
}
self.bearer_error_msg = {
'msg': (
f"Missing 'Bearer' type in "
f"'{self.token_header_name}' header."
f" Expected '{self.token_header_name}: "
f"Bearer <JWT>'"
)
}
self.decode_error_msg = {
'msg': 'Signature verification failed.'
}
self.expired_token_msg = {
'msg': 'JWT Token has expired'
}
self.invalid_token_type_msg = {
'msg': "Invalid JWT token type"
}
self.invalid_nbf_msg = {
'msg': "The token is not yet valid (nbf)"
}
| 31.12
| 63
| 0.591902
|
e68f89fd7c8856553178ee3871fffd0368b18b82
| 535
|
py
|
Python
|
PyObjCTest/test_nstextfieldcell.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
PyObjCTest/test_nstextfieldcell.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
PyObjCTest/test_nstextfieldcell.py
|
linuxfood/pyobjc-framework-Cocoa-test
|
3475890f165ab26a740f13d5afe4c62b4423a140
|
[
"MIT"
] | null | null | null |
import AppKit
from PyObjCTools.TestSupport import TestCase
class TestNSTextFieldCell(TestCase):
def testConstants(self):
self.assertEqual(AppKit.NSTextFieldSquareBezel, 0)
self.assertEqual(AppKit.NSTextFieldRoundedBezel, 1)
def testMethods(self):
self.assertResultIsBOOL(AppKit.NSTextFieldCell.drawsBackground)
self.assertArgIsBOOL(AppKit.NSTextFieldCell.setDrawsBackground_, 0)
self.assertArgIsBOOL(
AppKit.NSTextFieldCell.setWantsNotificationForMarkedText_, 0
)
| 33.4375
| 75
| 0.75514
|
722db66430085b3d10952c6adc667b856746e62f
| 1,182
|
py
|
Python
|
protocol_tests/conftest.py
|
dhh1128/aries-protocol-test-suite
|
7848cad6ccd27eb558e85865433ccb8bf6b645f7
|
[
"Apache-2.0"
] | 1
|
2019-12-10T11:36:02.000Z
|
2019-12-10T11:36:02.000Z
|
protocol_tests/conftest.py
|
dhh1128/aries-protocol-test-suite
|
7848cad6ccd27eb558e85865433ccb8bf6b645f7
|
[
"Apache-2.0"
] | null | null | null |
protocol_tests/conftest.py
|
dhh1128/aries-protocol-test-suite
|
7848cad6ccd27eb558e85865433ccb8bf6b645f7
|
[
"Apache-2.0"
] | null | null | null |
""" Test Suite fixture definitions.
These fixtures define the core functionality of the testing agent.
For more information on how pytest fixtures work, see
https://docs.pytest.org/en/latest/fixture.html#fixture
"""
import asyncio
import json
import os
import pytest
from agent_core.compat import create_task
from . import TestingAgent
# pylint: disable=redefined-outer-name
@pytest.fixture(scope='session')
def event_loop():
""" Create a session scoped event loop.
pytest.asyncio plugin provides a default function scoped event loop
which cannot be used as a dependency to session scoped fixtures.
"""
return asyncio.get_event_loop()
@pytest.fixture(scope='session')
def config(pytestconfig):
""" Get suite configuration.
"""
yield pytestconfig.suite_config
# TODO: Cleanup?
@pytest.fixture(scope='session')
async def agent(config):
""" The persistent agent used by the test suite to test other agents """
test_suite_agent = await TestingAgent.from_config_async(config)
task = create_task(test_suite_agent.start())
yield test_suite_agent
await test_suite_agent.shutdown()
task.cancel()
| 24.122449
| 76
| 0.73181
|
1df0754940dd0bf3418fd337eef74566a6539b3e
| 265
|
py
|
Python
|
cazipcode/pkg/superjson/__init__.py
|
MacHu-GWU/cazipcode-project
|
c10ae34629a2dee954a5cc0464baf0d997265af8
|
[
"MIT"
] | null | null | null |
cazipcode/pkg/superjson/__init__.py
|
MacHu-GWU/cazipcode-project
|
c10ae34629a2dee954a5cc0464baf0d997265af8
|
[
"MIT"
] | null | null | null |
cazipcode/pkg/superjson/__init__.py
|
MacHu-GWU/cazipcode-project
|
c10ae34629a2dee954a5cc0464baf0d997265af8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = "0.0.4"
__short_description__ = "Extendable json encode/decode library."
__license__ = "MIT"
try:
from ._superjson import SuperJson, get_class_name, superjson as json
except Exception as e:
pass
| 22.083333
| 72
| 0.716981
|
50ab12395325fe0a055dbe43747c68272d5614df
| 1,647
|
py
|
Python
|
setup.py
|
Jardo72/python-graph-alg-lib
|
15616bfab5d134186be1d37dd939dafabfd04b81
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Jardo72/python-graph-alg-lib
|
15616bfab5d134186be1d37dd939dafabfd04b81
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Jardo72/python-graph-alg-lib
|
15616bfab5d134186be1d37dd939dafabfd04b81
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2020 Jaroslav Chmurny
#
# This file is part of Library of Graph Algorithms for Python.
#
# Library of Graph Algorithms for Python is free software developed for
# educational # and experimental purposes. It is licensed under the Apache
# License, Version 2.0 # (the "License"); you may not use this file except
# in compliance with the # License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import find_packages, setup
with open("README.md", "r") as readme_file:
long_description = readme_file.read()
setup(
name="python-graph-alg-lib",
version="1.0.0",
author="Jaroslav Chmurny",
author_email="jaroslav.chmurny@gmail.com",
description="Library of Graph Algorithms for Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Jardo72/python-graph-alg-lib",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Typing :: Typed",
"Topic :: Software Development :: Libraries",
"Topic :: Education",
],
python_requires='>=3.8',
)
| 36.6
| 76
| 0.69156
|
ffd02983010edd32cb1cf04576ed2eb24eb5ffc5
| 24,729
|
py
|
Python
|
apps/oozie/src/oozie/importlib/workflows.py
|
t3hi3x/hue
|
36d71c1a8dd978b899ef2dc3eef8887b68fd99a8
|
[
"Apache-2.0"
] | null | null | null |
apps/oozie/src/oozie/importlib/workflows.py
|
t3hi3x/hue
|
36d71c1a8dd978b899ef2dc3eef8887b68fd99a8
|
[
"Apache-2.0"
] | null | null | null |
apps/oozie/src/oozie/importlib/workflows.py
|
t3hi3x/hue
|
36d71c1a8dd978b899ef2dc3eef8887b68fd99a8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Import an external workflow by providing an XML definition.
The workflow definition is imported via the method 'import_workflow'.
The XML is first transformed into a django serialized string that can be deserialized and interpreted.
The interpreted objects are then assigned the worklow, stripped of any useless IDs and saved.
Then the links are interpreted from the original XML definition.
First the basic links are interpreted for basic hierarchy traversal.
Then the related links are infered, including Decision node ends.
See oozie.models.Decision for more information on decision ends.
The XSLTs are partitioned by version.
For every new workflow DTD version a new directory should be created.
IE: uri:oozie:workflow:0.4 => 0.4 directory in xslt dir.
Action extensions are also versioned.
Every action extension will have its own version via /xslt/<workflow version>/extensions/<name of extensions>.<version>.xslt
"""
import json
import logging
from lxml import etree
import os
from django.core import serializers
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from desktop.models import Document
from oozie.conf import DEFINITION_XSLT_DIR
from oozie.models import Workflow, Node, Link, Start, End,\
Decision, DecisionEnd, Fork, Join,\
Kill
LOG = logging.getLogger(__name__)
OOZIE_NAMESPACES = ['uri:oozie:workflow:0.1', 'uri:oozie:workflow:0.2', 'uri:oozie:workflow:0.3', 'uri:oozie:workflow:0.4', 'uri:oozie:workflow:0.5']
LINKS = ('ok', 'error', 'path')
def _set_properties(workflow, root, namespace):
# root should be config element.
properties = []
seen = {}
namespaces = {
'n': namespace
}
for prop in root.xpath('n:property', namespaces=namespaces):
name = prop.xpath('n:name', namespaces=namespaces)[0].text
value = prop.xpath('n:value', namespaces=namespaces)[0].text
if name not in seen:
properties.append({'name': name, 'value': value})
seen[name] = True
workflow.job_properties = json.dumps(properties)
def _global_configuration(workflow, root, namespace):
# root should be global config element.
namespaces = {
'n': namespace
}
job_xml = root.xpath('n:job-xml', namespaces=namespaces)
configuration = root.xpath('n:configuration', namespaces=namespaces)
if job_xml:
workflow.job_xml = job_xml[0].text
if configuration:
_set_properties(workflow, configuration[0], namespace)
def _assign_workflow_properties(workflow, root, namespace):
namespaces = {
'n': namespace
}
global_config = root.xpath('n:global', namespaces=namespaces)
if global_config:
_global_configuration(workflow, global_config[0], namespace)
LOG.debug("Finished assigning properties to workflow %s" % smart_str(workflow.name))
def _save_links(workflow, root):
"""
Iterates over all links in the passed XML doc and creates links.
First non-META links are resolved and created, then META links.
Link name is chosen with the following logic:
If node is start, then use 'to'.
Else If node is Join, then use 'to'.
Else If node is Decision, then
If tag is 'default', then use 'default'
Else use 'start'
Else
If tag is 'path', use 'start'
Else use tag as name ('ok' or 'error')
This strategy has the following resolution:
- Fork and Decision nodes have Links named 'start'.
- Decision nodes have a 'default' link.
- Decision nodes may have a 'related' link that is there end.
- Fork nodes always have a 'related' node that is there end join node.
- Start and Join nodes have links named 'to'.
- All action nodes have 'ok' and 'error' links.
Note: The nodes that these links point to should exist already.
Note: Nodes are looked up by workflow and name.
Note: Unknown elements should throw an error.
"""
# Iterate over nodes
for child_el in root:
# Skip special nodes (like comments).
if not isinstance(child_el.tag, basestring):
continue
# Skip kill nodes.
if child_el.tag.endswith('kill'):
continue
# Skip global configuration.
if child_el.tag.endswith('global'):
continue
# Skip credentials configuration.
if child_el.tag.endswith('credentials'):
continue
tag = etree.QName(child_el).localname
name = child_el.attrib.get('name', tag)
LOG.debug("Getting node with data - XML TAG: %(tag)s\tLINK NAME: %(node_name)s\tWORKFLOW NAME: %(workflow_name)s" % {
'tag': smart_str(tag),
'node_name': smart_str(name),
'workflow_name': smart_str(workflow.name)
})
# Iterate over node members
# Join nodes have attributes which point to the next node
# Start node has attribute which points to first node
try:
parent = Node.objects.get(name=name, workflow=workflow).get_full_node()
except Node.DoesNotExist:
raise RuntimeError(_('Node with name %s for workflow %s does not exist.') % (name, workflow.name))
if isinstance(parent, Start):
_start_relationships(workflow, parent, child_el)
elif isinstance(parent, Join):
_join_relationships(workflow, parent, child_el)
elif isinstance(parent, Decision):
_decision_relationships(workflow, parent, child_el)
else:
_node_relationships(workflow, parent, child_el)
workflow.end = End.objects.get(workflow=workflow).get_full_node()
workflow.save()
_resolve_start_relationships(workflow)
_resolve_fork_relationships(workflow)
_resolve_decision_relationships(workflow)
LOG.debug("Finished resolving links for workflow %s" % smart_str(workflow.name))
def _start_relationships(workflow, parent, child_el):
"""
Resolve start node links.
Will always use 'to' link type.
"""
if 'to' not in child_el.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'to' attribute.") % parent.name)
workflow.start = parent
to = child_el.attrib['to']
try:
child = Node.objects.get(workflow=workflow, name=to)
except Node.DoesNotExist:
raise RuntimeError(_("Node %s has not been defined.") % to)
try:
obj = Link.objects.filter(parent=parent).get(name='to')
obj.child = child
except Link.DoesNotExist:
obj = Link.objects.create(name='to', parent=parent, child=child)
obj.save()
def _join_relationships(workflow, parent, child_el):
"""
Resolves join node links.
Will always use 'to' link type.
"""
if 'to' not in child_el.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'to' attribute.") % parent.name)
to = child_el.attrib['to']
try:
child = Node.objects.get(workflow=workflow, name=to)
except Node.DoesNotExist, e:
raise RuntimeError(_("Node %s has not been defined.") % to)
obj = Link.objects.create(name='to', parent=parent, child=child)
obj.save()
def _decision_relationships(workflow, parent, child_el):
"""
Resolves the switch statement like nature of decision nodes.
Will use 'to' link type, except for default case.
"""
for switch in child_el:
# Skip special nodes (like comments).
if not isinstance(switch.tag, basestring):
continue
for case in switch:
# Skip special nodes (like comments).
if not isinstance(case.tag, basestring):
continue
if 'to' not in case.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'to' attribute.") % parent.name)
to = case.attrib['to']
try:
child = Node.objects.get(workflow=workflow, name=to)
except Node.DoesNotExist, e:
raise RuntimeError(_("Node %s has not been defined.") % to)
if etree.QName(case).localname == 'default':
name = 'default'
obj = Link.objects.create(name=name, parent=parent, child=child)
else:
name = 'start'
comment = case.text.strip()
obj = Link.objects.create(name=name, parent=parent, child=child, comment=comment)
obj.save()
def _node_relationships(workflow, parent, child_el):
"""
Resolves node links.
Will use 'start' link type for fork nodes and 'to' link type for all other nodes.
Error links will automatically resolve to a single kill node.
"""
for el in child_el:
# Skip special nodes (like comments).
if not isinstance(el.tag, basestring):
continue
# Links
name = etree.QName(el).localname
if name in LINKS:
if name == 'path':
if 'start' not in el.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'start' attribute.") % parent.name)
to = el.attrib['start']
name = 'start'
else:
if 'to' not in el.attrib:
raise RuntimeError(_("Node %s has a link that is missing 'to' attribute.") % parent.name)
to = el.attrib['to']
try:
child = Node.objects.get(workflow=workflow, name=to)
except Node.DoesNotExist, e:
if name == 'error':
child, create = Kill.objects.get_or_create(name='kill', workflow=workflow, node_type=Kill.node_type)
else:
raise RuntimeError(_("Node %s has not been defined") % to)
obj = Link.objects.create(name=name, parent=parent, child=child)
obj.save()
def _resolve_start_relationships(workflow):
if not workflow.start:
raise RuntimeError(_("Workflow start has not been created."))
if not workflow.end:
raise RuntimeError(_("Workflow end has not been created."))
obj = Link.objects.get_or_create(name='related', parent=workflow.start, child=workflow.end)
def _resolve_fork_relationships(workflow):
"""
Requires proper workflow structure.
Fork must come before a join.
"""
def helper(workflow, node, last_fork):
if isinstance(node, Fork):
join = None
children = node.get_children()
for child in children:
join = helper(workflow, child.get_full_node(), node) or join
link = Link(name='related', parent=node, child=join)
link.save()
node = join
elif isinstance(node, Join):
return node
join = None
children = node.get_children()
for child in children:
join = helper(workflow, child.get_full_node(), last_fork) or join
return join
helper(workflow, workflow.start.get_full_node(), None)
def _resolve_decision_relationships(workflow):
"""
Requires proper workflow structure.
Decision must come before a any random ends.
DecisionEnd nodes are added to the end of the decision DAG.
Decision DAG ends are inferred by counting the parents of nodes that are node joins.
A 'related' link is created to associate the DecisionEnd to the Decision.
IE: D
D N
N N
N
equals
D
D N
N N
E
E
N
Performs a depth first search to understand branching.
"""
def insert_end(node, decision):
"""Insert DecisionEnd between node and node parents"""
parent_links = node.get_parent_links().exclude(name='default')
decision_end = decision.get_child_end()
# Find parent decision node for every end's parent.
# If the decision node is the one passed,
# change the parent to link to the Decision node's DecisionEnd node.
# Skip embedded decisions and forks along the way.
decision_end_used = False
for parent_link in parent_links:
parent = parent_link.parent.get_full_node()
node_temp = parent
while node_temp and not isinstance(node_temp, Decision):
if isinstance(node_temp, Join):
node_temp = node_temp.get_parent_fork().get_parent()
elif isinstance(node_temp, DecisionEnd):
node_temp = node_temp.get_parent_decision().get_parent()
else:
node_temp = node_temp.get_parent()
if node_temp.id == decision.id and parent.node_type != Decision.node_type:
links = Link.objects.filter(parent=parent).exclude(name__in=['related', 'kill', 'error'])
if len(links) != 1:
raise RuntimeError(_('Cannot import workflows that have decision DAG leaf nodes with multiple children or no children.'))
link = links[0]
link.child = decision_end
link.save()
decision_end_used = True
# Create link between DecisionEnd and terminal node.
if decision_end_used and not Link.objects.filter(name='to', parent=decision_end, child=node).exists():
link = Link(name='to', parent=decision_end, child=node)
link.save()
def decision_helper(decision, subgraphs):
"""
Iterates through children, waits for ends.
When an end is found, finish the decision.
If the end has more parents than the decision has branches, bubble the end upwards.
"""
# Create decision end if it does not exist.
if not Link.objects.filter(parent=decision, name='related').exists():
end = DecisionEnd(workflow=workflow, node_type=DecisionEnd.node_type)
end.save()
link = Link(name='related', parent=decision, child=end)
link.save()
children = [_link.child.get_full_node() for _link in decision.get_children_links().exclude(name__in=['error','default'])]
ends = set()
for child in children:
end = helper(child, subgraphs)
if end:
ends.add(end)
# A single end means that we've found a unique end for this decision.
# Multiple ends mean that we've found a bad decision.
if len(ends) > 1:
raise RuntimeError(_('Cannot import workflows that have decisions paths with multiple terminal nodes that converge on a single terminal node.'))
elif len(ends) == 1:
end = ends.pop()
# Branch count will vary with each call if we have multiple decision nodes embedded within decision paths.
# This is because parents are replaced with DecisionEnd nodes.
fan_in_count = len(end.get_parent_links().exclude(name__in=['error','default']))
# IF it covers all branches, then it is an end that perfectly matches this decision.
# ELSE it is an end for a decision path that the current decision node is a part of as well.
# The unhandled case is multiple ends for a single decision that converge on a single end.
# This is not handled in Hue.
fan_out_count = len(decision.get_children_links().exclude(name__in=['error','default']))
if fan_in_count > fan_out_count:
insert_end(end, decision)
return end
elif fan_in_count == fan_out_count:
insert_end(end, decision)
# End node is a decision node.
# This means that there are multiple decision nodes in sequence.
# If both decision nodes are within a single decision path,
# then the end may need to be returned, if found.
if isinstance(end, Decision):
end = decision_helper(end, subgraphs)
if end:
return end
# Can do this because we've replace all its parents with a single DecisionEnd node.
return helper(end, subgraphs)
else:
raise RuntimeError(_('Cannot import workflows that have decisions paths with multiple terminal nodes that converge on a single terminal node.'))
else:
raise RuntimeError(_('Cannot import workflows that have decisions paths that never end.'))
return None
def helper(node, subgraphs={}):
"""Iterates through nodes, returning ends."""
if node.name in subgraphs:
return subgraphs[node.name]
# Assume receive full node.
children = [link.child.get_full_node() for link in node.get_children_links().exclude(name__in=['error','default'])]
# Multiple parents means that we've potentially found an end.
# Joins will always have more than one parent.
fan_in_count = len(node.get_parent_links().exclude(name__in=['error','default']))
if fan_in_count > 1 and not isinstance(node, Join) and not isinstance(node, DecisionEnd):
return node
elif isinstance(node, Decision):
end = decision_helper(node, subgraphs)
if end:
# Remember end so we don't have go through checking this path again.
subgraphs[node.name] = end
return end
# In case of fork, should not find different ends.
elif len(children) > 1:
end = None
for child in children:
temp = helper(child, subgraphs)
end = end or temp
if end != temp:
raise RuntimeError(_('Different ends found in fork.'))
# Remember end so we don't have go through checking this path again.
subgraphs[node.name] = end
return end
elif children:
return helper(children.pop(), subgraphs)
# Likely reached end.
return None
if Node.objects.filter(workflow=workflow).filter(node_type=Decision.node_type).exists():
helper(workflow.start.get_full_node())
def _prepare_nodes(workflow, root):
"""
Prepare nodes for groking by Django
- Deserialize
- Automatically skip undefined nodes.
"""
objs = serializers.deserialize('xml', etree.tostring(root))
# First pass is a list of nodes and their types respectively.
# Must link up nodes with their respective full nodes.
node = None
nodes = []
for obj in objs:
obj.object.workflow = workflow
if type(obj.object) is Node:
node = obj.object
else:
node.node_type = obj.object.node_type
full_node = obj.object
for k, v in vars(node).items():
if not k.startswith('_') and k not in ('node_type','workflow','node_ptr_id'):
setattr(full_node, k, v)
full_node.workflow = workflow
full_node.node_type = type(full_node).node_type
full_node.node_ptr_id = None
full_node.id = None
nodes.append(full_node)
return nodes
def _preprocess_nodes(workflow, transformed_root, workflow_definition_root, nodes, fs=None):
"""
preprocess nodes
Sets credentials keys for actions.
Resolve start name and subworkflow dependencies.
Looks at path and interrogates all workflows until the proper deployment path is found.
If the proper deployment path is never found, then
"""
for action_el in workflow_definition_root:
if 'cred' in action_el.attrib:
for full_node in nodes:
if full_node.name == action_el.attrib['name']:
full_node.credentials = [{"name": cred, "value": True} for cred in action_el.attrib['cred'].split(',')];
for full_node in nodes:
if full_node.node_type is 'start':
full_node.name = 'start'
elif full_node.node_type is 'subworkflow':
app_path = None
for action_el in workflow_definition_root:
if 'name' in action_el.attrib and action_el.attrib['name'] == full_node.name:
for subworkflow_el in action_el:
if etree.QName(subworkflow_el).localname == 'sub-workflow':
for property_el in subworkflow_el:
if etree.QName(property_el).localname == 'app-path':
app_path = property_el.text
if app_path is None:
LOG.debug("Could not find deployment directory for subworkflow action %s" % full_node.name)
else:
LOG.debug("Found deployment directory for subworkflow action %s" % full_node.name)
subworkflow = _resolve_subworkflow_from_deployment_dir(fs, workflow, app_path)
if subworkflow:
LOG.debug("Found workflow %s in deployment directory %s" % (workflow, app_path))
full_node.sub_workflow = subworkflow
else:
LOG.debug("Could not find workflow with deployment directory: %s" % app_path)
def _resolve_subworkflow_from_deployment_dir(fs, workflow, app_path):
"""
Resolves subworkflow in a subworkflow node
Looks at path and interrogates all workflows until the proper deployment path is found.
If the proper deployment path is never found, then
"""
if not fs:
raise RuntimeError(_("No hadoop file system to operate on."))
if app_path.endswith('/'):
app_path = app_path[:-1]
if app_path.startswith('hdfs://'):
app_path = app_path[7:]
try:
f = fs.open('%s/workflow.xml' % app_path)
root = etree.parse(f)
f.close()
return Workflow.objects.get(name=root.attrib['name'], owner=workflow.owner, managed=True)
except IOError:
pass
except (KeyError, AttributeError), e:
raise RuntimeError(_("Could not find workflow name when resolving subworkflow."))
except Workflow.DoesNotExist, e:
raise RuntimeError(_("Could not find workflow with name %s extracted from subworkflow path %s") % (root.attrib['name'], app_path))
except Exception, e:
raise RuntimeError(_("Could not find workflow at path %s: %s") % (app_path, e))
for subworkflow in Document.objects.available(Workflow, workflow.owner):
if subworkflow.deployment_dir == app_path:
return subworkflow
return None
def _save_nodes(workflow, nodes):
"""
Save nodes, but skip kill nodes because we create a single kill node to use.
"""
for node in nodes:
if node.node_type is 'kill':
continue
try:
# Do not overwrite start or end node
Node.objects.get(workflow=workflow, node_type=node.node_type, name=node.name)
except Node.DoesNotExist:
node.save()
def _process_metadata(workflow, metadata):
# Job attributes
attributes = metadata.setdefault('attributes', {})
workflow.description = attributes.setdefault('description', workflow.description)
workflow.deployment_dir = attributes.setdefault('deployment_dir', workflow.deployment_dir)
# Workflow node attributes
nodes = metadata.setdefault('nodes', {})
for node_name in nodes:
try:
node = Node.objects.get(name=node_name, workflow=workflow).get_full_node()
node_attributes = nodes[node_name].setdefault('attributes', {})
for node_attribute in node_attributes:
setattr(node, node_attribute, node_attributes[node_attribute])
node.save()
except Node.DoesNotExist:
# @TODO(abe): Log or throw error?
raise
except AttributeError:
# @TODO(abe): Log or throw error?
# Here there was an attribute reference in the metadata
# for this node that isn't a member of the node.
raise
def import_workflow_root(workflow, workflow_definition_root, metadata=None, fs=None):
try:
xslt_definition_fh = open("%(xslt_dir)s/workflow.xslt" % {
'xslt_dir': os.path.join(DEFINITION_XSLT_DIR.get(), 'workflows')
})
tag = etree.QName(workflow_definition_root.tag)
schema_version = tag.namespace
# Ensure namespace exists
if schema_version not in OOZIE_NAMESPACES:
raise RuntimeError(_("Tag with namespace %(namespace)s is not valid. Please use one of the following namespaces: %(namespaces)s") % {
'namespace': workflow_definition_root.tag,
'namespaces': ', '.join(OOZIE_NAMESPACES)
})
# Get XSLT
xslt = etree.parse(xslt_definition_fh)
xslt_definition_fh.close()
transform = etree.XSLT(xslt)
# Transform XML using XSLT
transformed_root = transform(workflow_definition_root)
# Resolve workflow dependencies and node types and link dependencies
nodes = _prepare_nodes(workflow, transformed_root)
_preprocess_nodes(workflow, transformed_root, workflow_definition_root, nodes, fs)
_save_nodes(workflow, nodes)
_save_links(workflow, workflow_definition_root)
_assign_workflow_properties(workflow, workflow_definition_root, schema_version)
if metadata:
_process_metadata(workflow, metadata)
# Update workflow attributes
workflow.schema_version = schema_version
workflow.name = workflow_definition_root.get('name')
workflow.save()
except:
workflow.delete(skip_trash=True)
raise
def import_workflow(workflow, workflow_definition, metadata=None, fs=None):
# Parse Workflow Definition
workflow_definition_root = etree.fromstring(workflow_definition)
if workflow_definition_root is None:
raise RuntimeError(_("Could not find any nodes in Workflow definition. Maybe it's malformed?"))
return import_workflow_root(workflow, workflow_definition_root, metadata, fs)
| 35.943314
| 152
| 0.693882
|
4ddc9c7c9665e584b286792abecc49003d1cac1a
| 1,386
|
py
|
Python
|
nessus/permissions.py
|
tharvik/nessus
|
4551c319ac6cb3026ddb096a0f6f71f060a578ab
|
[
"CC0-1.0"
] | null | null | null |
nessus/permissions.py
|
tharvik/nessus
|
4551c319ac6cb3026ddb096a0f6f71f060a578ab
|
[
"CC0-1.0"
] | null | null | null |
nessus/permissions.py
|
tharvik/nessus
|
4551c319ac6cb3026ddb096a0f6f71f060a578ab
|
[
"CC0-1.0"
] | null | null | null |
from enum import Enum
from typing import Mapping, Union
from nessus.error import NessusError
from nessus.model import Object, lying_type
class NessusPermissionType(Enum):
default = 'default'
user = 'user'
group = 'group'
class NessusPermissionValueError(NessusError):
def __init__(self, value: int) -> None:
super().__init__()
self.value = int
class NessusPermission(Object):
"""
lies:
- `owner` could be None
- `permissions_id` could be None
"""
def __init__(self, owner: int, permission_type: str, permissions: int, permission_id: int, name: str) -> None:
if permissions not in (0, 16, 32, 64, 128):
raise NessusPermissionValueError(permissions)
self.owner = owner
self.type = permission_type
self.permissions = permissions
self.id = permission_id
self.name = name
@staticmethod
def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusPermission':
owner = lying_type(json_dict['owner'], int) # it's None actually
permission_type = str(json_dict['type'])
permissions = int(json_dict['permissions'])
permission_id = lying_type(json_dict['id'], int) # it's None actually
name = str(json_dict['name'])
return NessusPermission(owner, permission_type, permissions, permission_id, name)
| 29.489362
| 114
| 0.660895
|
381ad4194d34ead3158135cd01b5540f35b0ce2a
| 3,234
|
py
|
Python
|
signalsample.py
|
Topstack-defi/bot-binance-volatility-trading
|
b5d439f326d7a35c5320de20744243050504181e
|
[
"MIT"
] | 4
|
2021-11-10T11:47:39.000Z
|
2022-02-03T07:07:37.000Z
|
signalsample.py
|
Topstack-defi/bot-binance-volatility-trading
|
b5d439f326d7a35c5320de20744243050504181e
|
[
"MIT"
] | 1
|
2021-11-10T03:52:25.000Z
|
2021-11-11T07:00:18.000Z
|
signalsample.py
|
Topstack-defi/bot-binance-volatility-trading
|
b5d439f326d7a35c5320de20744243050504181e
|
[
"MIT"
] | 2
|
2021-11-18T17:19:46.000Z
|
2021-11-20T02:47:06.000Z
|
from tradingview_ta import TA_Handler, Interval, Exchange
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used for directory handling
import glob
import time
MY_EXCHANGE = 'BINANCE'
MY_SCREENER = 'CRYPTO'
MY_FIRST_INTERVAL = Interval.INTERVAL_1_MINUTE
MY_SECOND_INTERVAL = Interval.INTERVAL_5_MINUTES
TA_BUY_THRESHOLD = 18 # How many of the 26 indicators to indicate a buy
PAIR_WITH = 'USDT'
TICKERS = 'signalsample.txt'
TIME_TO_WAIT = 4 # Minutes to wait between analysis
FULL_LOG = False # List anylysis result to console
def analyze(pairs):
taMax = 0
taMaxCoin = 'none'
signal_coins = {}
first_analysis = {}
second_analysis = {}
first_handler = {}
second_handler = {}
if os.path.exists('signals/signalsample.exs'):
os.remove('signals/signalsample.exs')
for pair in pairs:
first_handler[pair] = TA_Handler(
symbol=pair,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_FIRST_INTERVAL,
timeout= 10
)
second_handler[pair] = TA_Handler(
symbol=pair,
exchange=MY_EXCHANGE,
screener=MY_SCREENER,
interval=MY_SECOND_INTERVAL,
timeout= 10
)
for pair in pairs:
try:
first_analysis = first_handler[pair].get_analysis()
second_analysis = second_handler[pair].get_analysis()
except Exception as e:
print("Exeption:")
print(e)
print (f'Coin: {pair}')
print (f'First handler: {first_handler[pair]}')
print (f'Second handler: {second_handler[pair]}')
tacheckS = 0
first_tacheck = first_analysis.summary['BUY']
second_tacheck = second_analysis.summary['BUY']
if FULL_LOG:
print(f'{pair} First {first_tacheck} Second {second_tacheck}')
else:
print(".", end = '')
if first_tacheck > taMax:
taMax = first_tacheck
taMaxCoin = pair
if first_tacheck >= TA_BUY_THRESHOLD and second_tacheck >= TA_BUY_THRESHOLD:
signal_coins[pair] = pair
print("")
print(f'Signal detected on {pair}')
with open('signals/signalsample.exs','a+') as f:
f.write(pair + '\n')
print("")
print(f'Max signal by {taMaxCoin} at {taMax} on shortest timeframe')
return signal_coins
if __name__ == '__main__':
signal_coins = {}
pairs = {}
pairs=[line.strip() for line in open(TICKERS)]
for line in open(TICKERS):
pairs=[line.strip() + PAIR_WITH for line in open(TICKERS)]
while True:
print(f'Analyzing {len(pairs)} coins')
signal_coins = analyze(pairs)
if len(signal_coins) == 0:
print(f'No coins above {TA_BUY_THRESHOLD} threshold')
else:
print(f'{len(signal_coins)} coins above {TA_BUY_THRESHOLD} treshold on both timeframes')
print(f'Waiting {TIME_TO_WAIT} minutes for next analysis')
time.sleep((TIME_TO_WAIT*60))
| 33
| 100
| 0.599258
|
f2538afae18e53515dabd55a22312633f849e54b
| 22,156
|
py
|
Python
|
netpyne/sim/load.py
|
urdapile/netpyne
|
c87bcda9ca13e37af4e572f892affe24f09699cf
|
[
"MIT"
] | null | null | null |
netpyne/sim/load.py
|
urdapile/netpyne
|
c87bcda9ca13e37af4e572f892affe24f09699cf
|
[
"MIT"
] | null | null | null |
netpyne/sim/load.py
|
urdapile/netpyne
|
c87bcda9ca13e37af4e572f892affe24f09699cf
|
[
"MIT"
] | null | null | null |
"""
Module for loading of data and simulations
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import range
# required to make json saving work in Python 2/3
try:
to_unicode = unicode
except NameError:
to_unicode = str
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
import sys
from collections import OrderedDict
from ..specs import Dict, ODict
from .. import specs
from . import utils
from . import setup
#------------------------------------------------------------------------------
# Load data from file
#------------------------------------------------------------------------------
def _loadFile(filename):
from .. import sim
import os
def _byteify(data, ignore_dicts = False):
# if this is a unicode string, return its string representation
if isinstance(data, basestring):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [ _byteify(item, ignore_dicts=True) for item in data ]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return OrderedDict({
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.items()
})
# if it's anything else, return it in its original form
return data
if hasattr(sim, 'cfg') and sim.cfg.timing: sim.timing('start', 'loadFileTime')
ext = os.path.basename(filename).split('.')[-1]
# load pickle file
if ext == 'pkl':
import pickle
print(('Loading file %s ... ' % (filename)))
with open(filename, 'rb') as fileObj:
if sys.version_info[0] == 2:
data = pickle.load(fileObj)
else:
data = pickle.load(fileObj, encoding='latin1')
# load dpk file
elif ext == 'dpk':
import gzip
print(('Loading file %s ... ' % (filename)))
#fn=sim.cfg.filename #.split('.')
#gzip.open(fn, 'wb').write(pk.dumps(dataSave)) # write compressed string
print('NOT IMPLEMENTED!')
# load json file
elif ext == 'json':
import json
print(('Loading file %s ... ' % (filename)))
with open(filename, 'r') as fileObj:
data = json.load(fileObj) # works with py2 and py3
# load mat file
elif ext == 'mat':
from scipy.io import loadmat
print(('Loading file %s ... ' % (filename)))
dataraw = loadmat(filename, struct_as_record=False, squeeze_me=True)
data = utils._mat2dict(dataraw)
#savemat(sim.cfg.filename+'.mat', replaceNoneObj(dataSave)) # replace None and {} with [] so can save in .mat format
print('Finished saving!')
# load HDF5 file (uses very inefficient hdf5storage module which supports dicts)
elif ext == 'saveHDF5':
#dataSaveUTF8 = _dict2utf8(replaceNoneObj(dataSave)) # replace None and {} with [], and convert to utf
import hdf5storage
print(('Loading file %s ... ' % (filename)))
#hdf5storage.writes(dataSaveUTF8, filename=sim.cfg.filename+'.hdf5')
print('NOT IMPLEMENTED!')
# load CSV file (currently only saves spikes)
elif ext == 'csv':
import csv
print(('Loading file %s ... ' % (filename)))
writer = csv.writer(open(sim.cfg.filename+'.csv', 'wb'))
#for dic in dataSave['simData']:
# for values in dic:
# writer.writerow(values)
print('NOT IMPLEMENTED!')
# load Dat file(s)
elif ext == 'dat':
print(('Loading file %s ... ' % (filename)))
print('NOT IMPLEMENTED!')
# traces = sim.cfg.recordTraces
# for ref in traces.keys():
# for cellid in sim.allSimData[ref].keys():
# dat_file_name = '%s_%s.dat'%(ref,cellid)
# dat_file = open(dat_file_name, 'w')
# trace = sim.allSimData[ref][cellid]
# print("Saving %i points of data on: %s:%s to %s"%(len(trace),ref,cellid,dat_file_name))
# for i in range(len(trace)):
# dat_file.write('%s\t%s\n'%((i*sim.cfg.dt/1000),trace[i]/1000))
else:
print(('Format not recognized for file %s'%(filename)))
return
if hasattr(sim, 'rank') and sim.rank == 0 and hasattr(sim, 'cfg') and sim.cfg.timing:
sim.timing('stop', 'loadFileTime')
print((' Done; file loading time = %0.2f s' % sim.timingData['loadFileTime']))
return data
#------------------------------------------------------------------------------
# Load simulation config from file
#------------------------------------------------------------------------------
def loadSimCfg(filename, data=None, setLoaded=True):
"""
Function for/to <short description of `netpyne.sim.load.loadSimCfg`>
Parameters
----------
filename : <type>
<Short description of filename>
**Default:** *required*
data : <``None``?>
<Short description of data>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
setLoaded : bool
<Short description of setLoaded>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
if not data:
data = _loadFile(filename)
print('Loading simConfig...')
if 'simConfig' in data:
if setLoaded:
setup.setSimCfg(data['simConfig'])
else:
return specs.SimConfig(data['simConfig'])
else:
print((' simConfig not found in file %s'%(filename)))
pass
#------------------------------------------------------------------------------
# Load netParams from cell
#------------------------------------------------------------------------------
def loadNetParams(filename, data=None, setLoaded=True):
"""
Function for/to <short description of `netpyne.sim.load.loadNetParams`>
Parameters
----------
filename : <type>
<Short description of filename>
**Default:** *required*
data : <``None``?>
<Short description of data>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
setLoaded : bool
<Short description of setLoaded>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
if not data: data = _loadFile(filename)
print('Loading netParams...')
if 'net' in data and 'params' in data['net']:
if setLoaded:
setup.setNetParams(data['net']['params'])
else:
return specs.NetParams(data['net']['params'])
else:
print(('netParams not found in file %s'%(filename)))
pass
#------------------------------------------------------------------------------
# Load cells and pops from file and create NEURON objs
#------------------------------------------------------------------------------
def loadNet(filename, data=None, instantiate=True, compactConnFormat=False):
"""
Function for/to <short description of `netpyne.sim.load.loadNet`>
Parameters
----------
filename : <type>
<Short description of filename>
**Default:** *required*
data : <``None``?>
<Short description of data>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
instantiate : bool
<Short description of instantiate>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
compactConnFormat : bool
<Short description of compactConnFormat>
**Default:** ``False``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
if not data: data = _loadFile(filename)
if not hasattr(sim, 'net'): sim.initialize()
if 'net' in data and 'cells' in data['net'] and 'pops' in data['net']:
loadNow = True
if hasattr(sim, 'rank'):
if sim.rank != 0:
loadNow = False
if loadNow:
sim.timing('start', 'loadNetTime')
print('Loading net...')
if compactConnFormat:
compactToLongConnFormat(data['net']['cells'], compactConnFormat) # convert loaded data to long format
sim.net.allPops = data['net']['pops']
sim.net.allCells = data['net']['cells']
if instantiate:
try:
# calculate cells to instantiate in this node
if hasattr(sim, 'rank'):
if isinstance(instantiate, list):
cellsNode = [data['net']['cells'][i] for i in range(int(sim.rank), len(data['net']['cells']), sim.nhosts) if i in instantiate]
else:
cellsNode = [data['net']['cells'][i] for i in range(int(sim.rank), len(data['net']['cells']), sim.nhosts)]
else:
if isinstance(instantiate, list):
cellsNode = [data['net']['cells'][i] for i in range(0, len(data['net']['cells']), 1) if i in instantiate]
else:
cellsNode = [data['net']['cells'][i] for i in range(0, len(data['net']['cells']), 1)]
except:
print('Unable to instantiate network...')
try:
if sim.cfg.createPyStruct:
for popLoadLabel, popLoad in data['net']['pops'].items():
pop = sim.Pop(popLoadLabel, popLoad['tags'])
pop.cellGids = popLoad['cellGids']
sim.net.pops[popLoadLabel] = pop
for cellLoad in cellsNode:
# create new CompartCell object and add attributes, but don't create sections or associate gid yet
# TO DO: assumes CompartCell -- add condition to load PointCell
cell = sim.CompartCell(gid=cellLoad['gid'], tags=cellLoad['tags'], create=False, associateGid=False)
try:
if sim.cfg.saveCellSecs:
cell.secs = Dict(cellLoad['secs'])
else:
createNEURONObjorig = sim.cfg.createNEURONObj
sim.cfg.createNEURONObj = False # avoid creating NEURON Objs now; just needpy struct
cell.create()
sim.cfg.createNEURONObj = createNEURONObjorig
except:
if sim.cfg.verbose: print(' Unable to load cell secs')
try:
cell.conns = [Dict(conn) for conn in cellLoad['conns']]
except:
if sim.cfg.verbose: print(' Unable to load cell conns')
try:
cell.stims = [Dict(stim) for stim in cellLoad['stims']]
except:
if sim.cfg.verbose: print(' Unable to load cell stims')
sim.net.cells.append(cell)
print((' Created %d cells' % (len(sim.net.cells))))
print((' Created %d connections' % (sum([len(c.conns) for c in sim.net.cells]))))
print((' Created %d stims' % (sum([len(c.stims) for c in sim.net.cells]))))
except:
print('Unable to create Python structure...')
try:
# only create NEURON objs, if there is Python struc (fix so minimal Python struct is created)
if sim.cfg.createNEURONObj:
if sim.cfg.verbose: print(" Adding NEURON objects...")
# create NEURON sections, mechs, syns, etc; and associate gid
for cell in sim.net.cells:
prop = {'secs': cell.secs}
cell.createNEURONObj(prop) # use same syntax as when creating based on high-level specs
cell.associateGid() # can only associate once the hSection obj has been created
# create all NEURON Netcons, NetStims, etc
sim.pc.barrier()
for cell in sim.net.cells:
try:
cell.addStimsNEURONObj() # add stims first so can then create conns between netstims
cell.addConnsNEURONObj()
except:
if sim.cfg.verbose: ' Unable to load instantiate cell conns or stims'
print((' Added NEURON objects to %d cells' % (len(sim.net.cells))))
except:
print('Unable to create NEURON objects...')
if loadNow and sim.cfg.timing: #if sim.rank == 0 and sim.cfg.timing:
sim.timing('stop', 'loadNetTime')
print((' Done; re-instantiate net time = %0.2f s' % sim.timingData['loadNetTime']))
else:
print((' netCells and/or netPops not found in file %s'%(filename)))
#------------------------------------------------------------------------------
# Load netParams from cell
#------------------------------------------------------------------------------
def loadSimData(filename, data=None):
"""
Function for/to <short description of `netpyne.sim.load.loadSimData`>
Parameters
----------
filename : <type>
<Short description of filename>
**Default:** *required*
data : <``None``?>
<Short description of data>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
if not data: data = _loadFile(filename)
print('Loading simData...')
if 'simData' in data:
sim.allSimData = data['simData']
else:
print((' simData not found in file %s'%(filename)))
pass
#------------------------------------------------------------------------------
# Load all data in file
#------------------------------------------------------------------------------
def loadAll(filename, data=None, instantiate=True, createNEURONObj=True):
"""
Function for/to <short description of `netpyne.sim.load.loadAll`>
Parameters
----------
filename : <type>
<Short description of filename>
**Default:** *required*
data : <``None``?>
<Short description of data>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
instantiate : bool
<Short description of instantiate>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
createNEURONObj : bool
<Short description of createNEURONObj>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
"""
from .. import sim
if not data: data = _loadFile(filename)
loadSimCfg(filename, data=data)
sim.cfg.createNEURONObj = createNEURONObj # set based on argument
loadNetParams(filename, data=data)
if hasattr(sim.cfg, 'compactConnFormat'):
connFormat = sim.cfg.compactConnFormat
else:
print('Error: no connFormat provided in simConfig')
sys.exit()
loadNet(filename, data=data, instantiate=instantiate, compactConnFormat=connFormat)
loadSimData(filename, data=data)
#------------------------------------------------------------------------------
# Convert compact (list-based) to long (dict-based) conn format
#------------------------------------------------------------------------------
def compactToLongConnFormat(cells, connFormat):
"""
Function for/to <short description of `netpyne.sim.load.compactToLongConnFormat`>
Parameters
----------
cells : <type>
<Short description of cells>
**Default:** *required*
connFormat : <type>
<Short description of connFormat>
**Default:** *required*
"""
formatIndices = {key: connFormat.index(key) for key in connFormat}
try:
for cell in cells:
for iconn, conn in enumerate(cell['conns']):
cell['conns'][iconn] = {key: conn[index] for key,index in formatIndices.items()}
return cells
except:
print("Error converting conns from compact to long format")
return cells
#------------------------------------------------------------------------------
# load HDF5 (conns for now)
#------------------------------------------------------------------------------
def loadHDF5(filename):
"""
Function for/to <short description of `netpyne.sim.load.loadHDF5`>
Parameters
----------
filename : <type>
<Short description of filename>
**Default:** *required*
"""
from .. import sim
import h5py
if sim.rank == 0: timing('start', 'loadTimeHDF5')
connsh5 = h5py.File(filename, 'r')
conns = [list(x) for x in connsh5['conns']]
connsFormat = list(connsh5['connsFormat'])
if sim.rank == 0: timing('stop', 'loadTimeHDF5')
return conns, connsFormat
#------------------------------------------------------------------------------
# Load cell tags and conns using ijson (faster!)
#------------------------------------------------------------------------------
def ijsonLoad(filename, tagsGidRange=None, connsGidRange=None, loadTags=True, loadConns=True, tagFormat=None, connFormat=None, saveTags=None, saveConns=None):
"""
Function for/to <short description of `netpyne.sim.load.ijsonLoad`>
Parameters
----------
filename : <type>
<Short description of filename>
**Default:** *required*
tagsGidRange : <``None``?>
<Short description of tagsGidRange>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
connsGidRange : <``None``?>
<Short description of connsGidRange>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
loadTags : bool
<Short description of loadTags>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
loadConns : bool
<Short description of loadConns>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
tagFormat : <``None``?>
<Short description of tagFormat>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
connFormat : <``None``?>
<Short description of connFormat>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
saveTags : <``None``?>
<Short description of saveTags>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
saveConns : <``None``?>
<Short description of saveConns>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
"""
# requires: 1) pip install ijson, 2) brew install yajl
from .. import sim
import ijson.backends.yajl2_cffi as ijson
import json
from time import time
tags, conns = {}, {}
if connFormat:
conns['format'] = connFormat
if tagFormat:
tags['format'] = tagFormat
with open(filename, 'rb') as fd:
start = time()
print('Loading data ...')
objs = ijson.items(fd, 'net.cells.item')
if loadTags and loadConns:
print('Storing tags and conns ...')
for cell in objs:
if tagsGidRange==None or cell['gid'] in tagsGidRange:
print('Cell gid: %d'%(cell['gid']))
if tagFormat:
tags[int(cell['gid'])] = [cell['tags'][param] for param in tagFormat]
else:
tags[int(cell['gid'])] = cell['tags']
if connsGidRange==None or cell['gid'] in connsGidRange:
if connFormat:
conns[int(cell['gid'])] = [[conn[param] for param in connFormat] for conn in cell['conns']]
else:
conns[int(cell['gid'])] = cell['conns']
elif loadTags:
print('Storing tags ...')
if tagFormat:
tags.update({int(cell['gid']): [cell['tags'][param] for param in tagFormat] for cell in objs if tagsGidRange==None or cell['gid'] in tagsGidRange})
else:
tags.update({int(cell['gid']): cell['tags'] for cell in objs if tagsGidRange==None or cell['gid'] in tagsGidRange})
elif loadConns:
print('Storing conns...')
if connFormat:
conns.update({int(cell['gid']): [[conn[param] for param in connFormat] for conn in cell['conns']] for cell in objs if connsGidRange==None or cell['gid'] in connsGidRange})
else:
conns.update({int(cell['gid']): cell['conns'] for cell in objs if connsGidRange==None or cell['gid'] in connsGidRange})
print('time ellapsed (s): ', time() - start)
tags = utils.decimalToFloat(tags)
conns = utils.decimalToFloat(conns)
if saveTags and tags:
outFilename = saveTags if isinstance(saveTags, basestring) else 'filename'[:-4]+'_tags.json'
print('Saving tags to %s ...' % (outFilename))
sim.saveJSON(outFilename, {'tags': tags})
if saveConns and conns:
outFilename = saveConns if isinstance(saveConns, basestring) else 'filename'[:-4]+'_conns.json'
print('Saving conns to %s ...' % (outFilename))
sim.saveJSON(outFilename, {'conns': conns})
return tags, conns
| 36.261866
| 187
| 0.523154
|
ed02cac010039e9e725a076a45c86316363a8106
| 22,660
|
py
|
Python
|
tests/datasource/data_connector/test_runtime_data_connector.py
|
rpanai/great_expectations
|
82c686088c0652a1b2e8e5eb95b5851efed32551
|
[
"Apache-2.0"
] | 1
|
2021-07-07T00:22:09.000Z
|
2021-07-07T00:22:09.000Z
|
tests/datasource/data_connector/test_runtime_data_connector.py
|
rpanai/great_expectations
|
82c686088c0652a1b2e8e5eb95b5851efed32551
|
[
"Apache-2.0"
] | null | null | null |
tests/datasource/data_connector/test_runtime_data_connector.py
|
rpanai/great_expectations
|
82c686088c0652a1b2e8e5eb95b5851efed32551
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
import pandas as pd
import pytest
from ruamel.yaml import YAML
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
BatchDefinition,
BatchRequest,
BatchSpec,
RuntimeBatchRequest,
)
from great_expectations.core.batch_spec import (
PathBatchSpec,
RuntimeDataBatchSpec,
RuntimeQueryBatchSpec,
S3BatchSpec,
)
from great_expectations.core.id_dict import IDDict
from great_expectations.datasource.data_connector import RuntimeDataConnector
yaml = YAML()
def test_self_check(basic_datasource):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
assert test_runtime_data_connector.self_check() == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"example_data_asset_names": [],
"data_assets": {},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_error_checking(basic_datasource):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# Test for an unknown datasource
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
)
)
# Test for an unknown data_connector
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
)
)
# test for missing runtime_parameters arg
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
batch_identifiers={"pipeline_stage_name": "munge"},
)
)
# test for too many runtime_parameters keys
with pytest.raises(ge_exceptions.InvalidBatchRequestError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df, "path": "my_path"},
batch_identifiers={"pipeline_stage_name": "munge"},
)
)
def test_batch_identifiers_and_batch_identifiers_success_all_keys_present(
basic_datasource,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: dict
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {"batch_data": test_df},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_definition_list) == 1
def test_batch_identifiers_and_batch_identifiers_error_illegal_keys(
basic_datasource,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: dict
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"custom_key_1": "custom_value_1",
}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# Insure that keys in batch_identifiers["batch_identifiers"] that are not among batch_identifiers declared in
# configuration
# are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset_name",
"runtime_parameters": {"batch_data": test_df},
"batch_identifiers": batch_identifiers,
}
batch_request: BatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
batch_identifiers = {"batch_identifiers": {"unknown_key": "some_value"}}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# Insure that keys in batch_identifiers["batch_identifiers"] that are not among batch_identifiers declared in
# configuration
# are not accepted. In this test, a single illegal key is present.
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {"batch_data": test_df},
"batch_identifiers": batch_identifiers,
}
batch_request: BatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
def test_get_available_data_asset_names(basic_datasource):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
expected_available_data_asset_names: List[str] = []
available_data_asset_names: List[
str
] = test_runtime_data_connector.get_available_data_asset_names()
assert available_data_asset_names == expected_available_data_asset_names
def test_get_available_data_asset_names_updating_after_batch_request(basic_datasource):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# empty if data_connector has not been used
assert test_runtime_data_connector.get_available_data_asset_names() == []
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset_1",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# run with my_data_asset_1
test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
# updated to my_data_asset_1
assert test_runtime_data_connector.get_available_data_asset_names() == [
"my_data_asset_1"
]
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset_2",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# run with my_data_asset_2
test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
# updated to my_data_asset_1 and my_data_asset_2
assert test_runtime_data_connector.get_available_data_asset_names() == [
"my_data_asset_1",
"my_data_asset_2",
]
def test_data_references_cache_updating_after_batch_request(
basic_datasource,
):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# empty if data_connector has not been used
assert test_runtime_data_connector.get_available_data_asset_names() == []
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset_1",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# run with my_data_asset_1
test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
assert test_runtime_data_connector._data_references_cache == {
"my_data_asset_1": {
"1234567890": [
BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset_1",
batch_identifiers=IDDict({"airflow_run_id": 1234567890}),
)
],
}
}
# update with
test_df_new: pd.DataFrame = pd.DataFrame(data={"col1": [5, 6], "col2": [7, 8]})
batch_identifiers = {
"airflow_run_id": 987654321,
}
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset_1",
"runtime_parameters": {
"batch_data": test_df_new,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# run with with new_data_asset but a new batch
test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
assert test_runtime_data_connector._data_references_cache == {
"my_data_asset_1": {
"1234567890": [
BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset_1",
batch_identifiers=IDDict({"airflow_run_id": 1234567890}),
)
],
"987654321": [
BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset_1",
batch_identifiers=IDDict({"airflow_run_id": 987654321}),
)
],
},
}
# new data_asset_name
test_df_new_asset: pd.DataFrame = pd.DataFrame(
data={"col1": [9, 10], "col2": [11, 12]}
)
batch_identifiers = {
"airflow_run_id": 5555555,
}
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset_2",
"runtime_parameters": {
"batch_data": test_df_new_asset,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# run with with new_data_asset but a new batch
test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
assert test_runtime_data_connector._data_references_cache == {
"my_data_asset_1": {
"1234567890": [
BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset_1",
batch_identifiers=IDDict({"airflow_run_id": 1234567890}),
)
],
"987654321": [
BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset_1",
batch_identifiers=IDDict({"airflow_run_id": 987654321}),
)
],
},
"my_data_asset_2": {
"5555555": [
BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset_2",
batch_identifiers=IDDict({"airflow_run_id": 5555555}),
)
]
},
}
assert test_runtime_data_connector.get_available_data_asset_names() == [
"my_data_asset_1",
"my_data_asset_2",
]
assert test_runtime_data_connector.get_data_reference_list_count() == 3
def test_get_batch_definition_list_from_batch_request_length_one(
basic_datasource,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: dict = {
"airflow_run_id": 1234567890,
}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset",
"runtime_parameters": {"batch_data": test_df},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
expected_batch_definition_list: List[BatchDefinition] = [
BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
batch_identifiers=IDDict(batch_identifiers),
)
]
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
assert batch_definition_list == expected_batch_definition_list
def test_get_batch_definition_list_from_batch_request_with_and_without_data_asset_name(
basic_datasource,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers = {
"airflow_run_id": 1234567890,
}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# data_asset_name is missing
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
with pytest.raises(TypeError):
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# test that name can be set as "my_data_asset"
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_definition_list) == 1
# check that default value has been set
assert batch_definition_list[0]["data_asset_name"] == "my_data_asset"
def test__get_data_reference_list(basic_datasource):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
expected_data_reference_list: List[str] = []
# noinspection PyProtectedMember
data_reference_list: List[
str
] = test_runtime_data_connector._get_data_reference_list()
assert data_reference_list == expected_data_reference_list
def test_refresh_data_references_cache(basic_datasource):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
assert len(test_runtime_data_connector._data_references_cache) == 0
def test__generate_batch_spec_parameters_from_batch_definition(
basic_datasource,
):
batch_identifiers = {
"custom_key_0": "staging",
"airflow_run_id": 1234567890,
}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
expected_batch_spec_parameters: dict = {"data_asset_name": "my_data_asset"}
# noinspection PyProtectedMember
batch_spec_parameters: dict = test_runtime_data_connector._generate_batch_spec_parameters_from_batch_definition(
batch_definition=BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
batch_identifiers=IDDict(batch_identifiers),
)
)
assert batch_spec_parameters == expected_batch_spec_parameters
def test__build_batch_spec(basic_datasource):
batch_identifiers = {
"custom_key_0": "staging",
"airflow_run_id": 1234567890,
}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
batch_definition = BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
batch_identifiers=IDDict(batch_identifiers),
)
batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec(
batch_definition=batch_definition,
runtime_parameters={
"batch_data": pd.DataFrame({"x": range(10)}),
},
)
assert type(batch_spec) == RuntimeDataBatchSpec
assert set(batch_spec.keys()) == {"batch_data", "data_asset_name"}
assert batch_spec["batch_data"].shape == (10, 1)
batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec(
batch_definition=batch_definition,
runtime_parameters={
"query": "my_query",
},
)
assert type(batch_spec) == RuntimeQueryBatchSpec
batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec(
batch_definition=batch_definition, runtime_parameters={"path": "my_path"}
)
assert type(batch_spec) == PathBatchSpec
batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec(
batch_definition=batch_definition,
runtime_parameters={"path": "s3://my.s3.path"},
)
assert type(batch_spec) == S3BatchSpec
batch_spec: BatchSpec = test_runtime_data_connector.build_batch_spec(
batch_definition=batch_definition,
runtime_parameters={"path": "s3a://my.s3.path"},
)
assert type(batch_spec) == S3BatchSpec
def test__get_data_reference_name(basic_datasource):
data_connector_query: dict = {
"batch_filter_parameters": {
"airflow_run_id": 1234567890,
}
}
batch_identifiers = IDDict(data_connector_query["batch_filter_parameters"])
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
assert (
test_runtime_data_connector._get_data_reference_name(batch_identifiers)
== "1234567890"
)
data_connector_query: dict = {
"batch_filter_parameters": {
"run_id_1": 1234567890,
"run_id_2": 1111111111,
}
}
batch_identifiers = IDDict(data_connector_query["batch_filter_parameters"])
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
assert (
test_runtime_data_connector._get_data_reference_name(batch_identifiers)
== "1234567890-1111111111"
)
| 34.969136
| 116
| 0.684598
|
29f8366d35be6e679f25fbe746bbccc52162fdc4
| 3,036
|
py
|
Python
|
db_clone/users/tests/test_views.py
|
zzdia/db_clone
|
f079d4a8f0cbd478d6db63907b719a2cd0936167
|
[
"MIT"
] | null | null | null |
db_clone/users/tests/test_views.py
|
zzdia/db_clone
|
f079d4a8f0cbd478d6db63907b719a2cd0936167
|
[
"MIT"
] | null | null | null |
db_clone/users/tests/test_views.py
|
zzdia/db_clone
|
f079d4a8f0cbd478d6db63907b719a2cd0936167
|
[
"MIT"
] | null | null | null |
import pytest
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import RequestFactory
from django.urls import reverse
from db_clone.users.forms import UserChangeForm
from db_clone.users.models import User
from db_clone.users.tests.factories import UserFactory
from db_clone.users.views import (
UserRedirectView,
UserUpdateView,
user_detail_view,
)
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def test_get_success_url(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
def test_form_valid(self, user: User, rf: RequestFactory):
view = UserUpdateView()
request = rf.get("/fake-url/")
# Add the session/message middleware to the request
SessionMiddleware().process_request(request)
MessageMiddleware().process_request(request)
request.user = user
view.request = request
# Initialize the form
form = UserChangeForm()
form.cleaned_data = []
view.form_valid(form)
messages_sent = [m.message for m in messages.get_messages(request)]
assert messages_sent == ["Information successfully updated"]
class TestUserRedirectView:
def test_get_redirect_url(self, user: User, rf: RequestFactory):
view = UserRedirectView()
request = rf.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
class TestUserDetailView:
def test_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = UserFactory()
response = user_detail_view(request, username=user.username)
assert response.status_code == 200
def test_not_authenticated(self, user: User, rf: RequestFactory):
request = rf.get("/fake-url/")
request.user = AnonymousUser()
response = user_detail_view(request, username=user.username)
login_url = reverse(settings.LOGIN_URL)
assert response.status_code == 302
assert response.url == f"{login_url}?next=/fake-url/"
| 30.979592
| 75
| 0.685771
|
8357272fbb17d714f72378604c91d5d5482e7bc4
| 1,084
|
py
|
Python
|
day006/main.py
|
marcosvbras/60-days-of-python
|
b6966192a8e9113dd13491ab8707c2b547397eea
|
[
"Apache-2.0"
] | 5
|
2018-04-07T02:45:04.000Z
|
2022-03-29T02:52:03.000Z
|
day006/main.py
|
marcosvbras/60-days-of-python
|
b6966192a8e9113dd13491ab8707c2b547397eea
|
[
"Apache-2.0"
] | null | null | null |
day006/main.py
|
marcosvbras/60-days-of-python
|
b6966192a8e9113dd13491ab8707c2b547397eea
|
[
"Apache-2.0"
] | 5
|
2019-05-10T03:12:17.000Z
|
2021-01-16T10:54:41.000Z
|
# coding=utf-8
def run():
# Attributing tuple values in variables
name, age, height, weight = ('Marcos', 21, 173, 62)
print("Name: {} - Age: {} - Height: {} - Weight: {}".format(
name, age, height, weight)
)
# >>> Name: Marcos - Age: 21 - Height: 173 - Weight: 62
animes = (
('Attack on Titan', 'Seinen'),
('Sakurasou no Pet na Kanojo', 'Slice Of Life'),
('Fullmetal Alchemist', 'Shonen'),
)
for anime in animes:
# The operator '%' understand the tuple 'anime' as separated fields
print("%s/%s" % anime)
# >>> Attack on Titan/Seinen
# Sakurasou no Pet na Kanojo/Slice Of Life
# Fullmetal Alchemist/Shone
cities = ['Porto Alegre', 'Rio de Janeiro', 'Minas Gerais']
# Creating a tuple with Generator Expressions (genexp)
# Generator Expressions are used to create sequences like Tuple
cities_tuple = tuple(city for city in cities)
print(cities_tuple)
# >>> ('Porto Alegre', 'Rio de Janeiro', 'Minas Gerais')
if __name__ == '__main__':
run()
| 32.848485
| 75
| 0.593173
|
9542726405d96e7cbbc1eb1ae74b42e20b651624
| 8,236
|
py
|
Python
|
test/functional/test_framework/key.py
|
Jiankun-Huang/Testcoin
|
4f6454f6f8c8e020a3d206ad370ad725f92caac4
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/key.py
|
Jiankun-Huang/Testcoin
|
4f6454f6f8c8e020a3d206ad370ad725f92caac4
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/key.py
|
Jiankun-Huang/Testcoin
|
4f6454f6f8c8e020a3d206ad370ad725f92caac4
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2011 Sam Rushing
"""ECC secp256k1 OpenSSL wrapper.
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
This file is modified from python-testcoinsuperlib.
"""
import ctypes
import ctypes.util
import hashlib
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey():
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash, low_s = True):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
assert mb_sig.raw[0] == 0x30
assert mb_sig.raw[1] == sig_size0.value - 2
total_size = mb_sig.raw[1]
assert mb_sig.raw[2] == 2
r_size = mb_sig.raw[3]
assert mb_sig.raw[4 + r_size] == 2
s_size = mb_sig.raw[5 + r_size]
s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')
if (not low_s) or s_value <= SECP256K1_ORDER_HALF:
return mb_sig.raw[:sig_size0.value]
else:
low_s_value = SECP256K1_ORDER - s_value
low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
low_s_bytes = low_s_bytes[1:]
new_s_size = len(low_s_bytes)
new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')
new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')
return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| 36.281938
| 130
| 0.691476
|
da824313141a69577ef720376ab5786d70634c25
| 1,252
|
py
|
Python
|
Engine/ScanTarget.py
|
abrahamfdzg/TibiaAuto12
|
c78503a6ed8fcad382694d8fd592c6a64c65fd5d
|
[
"MIT"
] | null | null | null |
Engine/ScanTarget.py
|
abrahamfdzg/TibiaAuto12
|
c78503a6ed8fcad382694d8fd592c6a64c65fd5d
|
[
"MIT"
] | null | null | null |
Engine/ScanTarget.py
|
abrahamfdzg/TibiaAuto12
|
c78503a6ed8fcad382694d8fd592c6a64c65fd5d
|
[
"MIT"
] | null | null | null |
def ScanTarget(BattlePosition, monster, HOOK_OPTION):
has_target = [0, 0]
if HOOK_OPTION == 0:
import pyautogui
has_target = pyautogui.locateOnScreen('images/Targets/' + monster + '.png', confidence=0.9, region=(
BattlePosition[0], BattlePosition[1], BattlePosition[2], BattlePosition[3]))
if has_target:
Target = pyautogui.center(has_target)
if Target[0] < BattlePosition[0]:
return Target[0] - 30, Target[1]
else:
return Target[0] - 40, Target[1]
else:
return 0, 0
elif HOOK_OPTION == 1:
from Engine.HookWindow import LocateCenterImage
has_target[0], has_target[1] = LocateCenterImage('images/Targets/' + monster + '.png', Precision=0.9, Region=(
BattlePosition[0], BattlePosition[1], BattlePosition[2], BattlePosition[3]))
if has_target[0] != 0 and has_target[1] != 0:
if has_target[0] < BattlePosition[0]:
return (BattlePosition[0] - 30) + has_target[0], has_target[1] + BattlePosition[1]
else:
return (BattlePosition[0] - 40) + has_target[0], has_target[1] + BattlePosition[1]
else:
return 0, 0
| 41.733333
| 118
| 0.589457
|
4682f1ab84d719cafd1d94669a9ee3ca5f1797fc
| 2,663
|
py
|
Python
|
tensorflow/python/training/server_lib_same_variables_clear_test.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 522
|
2016-06-08T02:15:50.000Z
|
2022-03-02T05:30:36.000Z
|
tensorflow/python/training/server_lib_same_variables_clear_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133
|
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/python/training/server_lib_same_variables_clear_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 108
|
2016-06-16T15:34:05.000Z
|
2022-03-12T13:23:11.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SameVariablesClearTest(test.TestCase):
# Verifies behavior of tf.Session.reset().
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
def testSameVariablesClear(self):
server = server_lib.Server.create_local_server()
# Creates a graph with 2 variables.
v0 = variables.Variable([[2, 1]], name="v0")
v1 = variables.Variable([[1], [2]], name="v1")
v2 = math_ops.matmul(v0, v1)
# Verifies that both sessions connecting to the same target return
# the same results.
sess_1 = session.Session(server.target)
sess_2 = session.Session(server.target)
sess_1.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
# Resets target. sessions abort. Use sess_2 to verify.
session.Session.reset(server.target)
with self.assertRaises(errors_impl.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
# Connects to the same target. Device memory for the variables would have
# been released, so they will be uninitialized.
sess_2 = session.Session(server.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
sess_2.run(v2)
# Reinitializes the variables.
sess_2.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
if __name__ == "__main__":
test.main()
| 38.594203
| 80
| 0.725498
|
30bcb874d1adb78a2282b2af3d423cb57dd26fd1
| 10,284
|
py
|
Python
|
training_v1_backup/training/DQN/run_dqn.py
|
prasoonpatidar/multiagentRL-resource-sharing
|
e63ba7fc3c7ab019e9fd109cd45b739e3322152f
|
[
"MIT"
] | null | null | null |
training_v1_backup/training/DQN/run_dqn.py
|
prasoonpatidar/multiagentRL-resource-sharing
|
e63ba7fc3c7ab019e9fd109cd45b739e3322152f
|
[
"MIT"
] | null | null | null |
training_v1_backup/training/DQN/run_dqn.py
|
prasoonpatidar/multiagentRL-resource-sharing
|
e63ba7fc3c7ab019e9fd109cd45b739e3322152f
|
[
"MIT"
] | null | null | null |
'''
Wrapper function to learn and evaluate dqn policies
'''
import numpy as np
import time
# custom libraries
from training.DQN.run_helper import buyerPenaltiesCalculator, buyerUtilitiesCalculator, evaluation, action2y
from training.DQN.run_helper import logger_handle, initialize_agent, get_ys, choose_prob, cumlativeBuyerExp, getPurchases
def learn_policy(run_config, seller_info, buyer_info, train_config, logger_pass):
# Initialize the logger
logger = logger_handle(logger_pass)
# get required parameters for WolFPHC algorithm
aux_price_min = 1 / seller_info.max_price
aux_price_max = 1 / seller_info.min_price
logger.info("Fetched raw market information..")
# initialize seller agents
sellers, logger = initialize_agent(seller_info, buyer_info, train_config, logger)
# Get Containers to record history(Interesting insight: append in python list is O(1))
price_history = []
purchase_history = []
provided_resource_history = []
seller_utility_history = []
seller_penalty_history = []
buyer_utility_history = []
buyer_penalty_history = []
# Start Loop for training
logger.info("Starting training iterations...")
start_time = time.time()
env_state = np.random.randint(0, train_config.action_count, seller_info.count)
next_state = np.random.randint(0, train_config.action_count, seller_info.count)
max_social_welfare = -np.inf
for train_iter in range(0, train_config.iterations):
if train_iter % 10 == 0:
logger.info("Finished %d training iterations in %.3f secs..." % (train_iter, time.time() - start_time))
# get the prices for all seller agents
actions = []
for tmpSeller in sellers:
actions.append(tmpSeller.greedy_actor(env_state))
actions = np.array(actions)
ys = action2y(actions, train_config.action_count, aux_price_min, aux_price_max)
probAll, yAll = choose_prob(ys, compare=False, yAll=None)
# Take step in environment: update env state by getting demands from consumers.
# Save prices in history
prices = 1 / ys
price_history.append(prices)
cumulativeBuyerExperience = cumlativeBuyerExp(buyer_info, sellers)
X = getPurchases(buyer_info, cumulativeBuyerExperience, ys, probAll)
# Save purchased history
purchases = X.sum(axis=0)
purchase_history.append(purchases)
# Get Buyer utilities and penalties in history
buyerUtilities = buyerUtilitiesCalculator(X, ys, buyer_info.V, buyer_info.a_val, probAll,
buyer_info.count,
cumulativeBuyerExperience, buyer_info.unfinished_task_penalty)
buyer_utility_history.append(buyerUtilities)
buyerPenalties = buyerPenaltiesCalculator(X, ys, buyer_info.V, buyer_info.a_val, buyer_info.count,
cumulativeBuyerExperience, buyer_info.unfinished_task_penalty)
buyer_penalty_history.append(buyerPenalties)
# get next state based on actions taken in this round
next_state = actions # actions taken in this round is next state
# Based on demands, calculate reward for all agents, and add observation to agents
seller_utilities = []
seller_penalties = []
seller_provided_resources = []
for j in range(0, seller_info.count):
x_j = X[j]
tmpSellerUtility, tmpSellerPenalty, z_j = sellers[j].reward(x_j,yAll)
reward = tmpSellerUtility+tmpSellerPenalty
# Update seller values
sellers[j].add_purchase_history(x_j, z_j)
seller_utilities.append(tmpSellerUtility)
seller_penalties.append(tmpSellerPenalty)
seller_provided_resources.append(z_j)
# train agent
sellers[j].observe((env_state, actions, reward, next_state, False))
if train_iter >= train_config.first_step_memory:
sellers[j].decay_epsilon()
if train_iter % train_config.replay_steps == 0:
sellers[j].replay()
sellers[j].update_target_model()
# set current state to next state
env_state=next_state
# Get seller utilties and penalties in history
seller_utilities = np.array(seller_utilities)
seller_penalties = np.array(seller_penalties)
seller_utility_history.append(seller_utilities)
seller_penalty_history.append(seller_penalties)
# update provided resources history
seller_provided_resources = np.array(seller_provided_resources)
provided_resource_history.append(seller_provided_resources)
# Update DQN weights if social welfare is better than max social welfare
social_welfare =np.sum(buyerUtilities) + np.sum(seller_utilities) + \
np.sum(buyerPenalties) +np.sum(seller_penalties)
if social_welfare > max_social_welfare:
for j in range(0, seller_info.count):
sellers[j].brain.save_model()
max_social_welfare = social_welfare
results_dict = {
'policy_store': train_config.agents_store_dir,
'buyer_info': buyer_info,
'seller_info': seller_info,
'price_history': price_history,
'seller_utilties': seller_utility_history,
'seller_penalties': seller_penalty_history,
'buyer_utilties': buyer_utility_history,
'buyer_penalties': buyer_penalty_history,
'demand_history': purchase_history,
'supply_history': provided_resource_history
}
return results_dict
def eval_policy(seller_info, buyer_info, train_config, results_dir, logger_pass):
# Initialize the logger
logger = logger_handle(logger_pass)
# get required parameters for WolFPHC algorithm
aux_price_min = 1 / seller_info.max_price
aux_price_max = 1 / seller_info.min_price
logger.info("Fetched raw market information..")
# set mode to testing
train_config.test = True
# initialize seller agents
sellers, logger = initialize_agent(seller_info, buyer_info, train_config, logger, is_trainer=False)
# Get Containers to record history(Interesting insight: append in python list is O(1))
price_history = []
purchase_history = []
provided_resource_history = []
seller_utility_history = []
seller_penalty_history = []
buyer_utility_history = []
buyer_penalty_history = []
# Start Loop for training
logger.info("Starting training iterations...")
start_time = time.time()
env_state = np.random.randint(0, train_config.action_count, seller_info.count)
next_state = np.random.randint(0, train_config.action_count, seller_info.count)
max_social_welfare = -np.inf
for eval_iter in range(0, train_config.iterations):
if eval_iter % 10 == 0:
logger.info("Finished %d evaluation iterations in %.3f secs..." % (eval_iter, time.time() - start_time))
# get the prices for all seller agents
actions = []
for tmpSeller in sellers:
actions.append(tmpSeller.greedy_actor(env_state))
actions = np.array(actions)
ys = action2y(actions, train_config.action_count, aux_price_min, aux_price_max)
probAll, yAll = choose_prob(ys, compare=False, yAll=None)
# Save prices in history
prices = 1 / ys
price_history.append(prices)
cumulativeBuyerExperience = cumlativeBuyerExp(buyer_info, sellers)
X = getPurchases(buyer_info, cumulativeBuyerExperience, ys, probAll)
# Save purchased history
purchases = X.sum(axis=0)
purchase_history.append(purchases)
# Get Buyer utilities and penalties in history
buyerUtilities = buyerUtilitiesCalculator(X, ys, buyer_info.V, buyer_info.a_val, probAll,
buyer_info.count,
cumulativeBuyerExperience, buyer_info.unfinished_task_penalty)
buyer_utility_history.append(buyerUtilities)
buyerPenalties = buyerPenaltiesCalculator(X, ys, buyer_info.V, buyer_info.a_val, buyer_info.count,
cumulativeBuyerExperience, buyer_info.unfinished_task_penalty)
buyer_penalty_history.append(buyerPenalties)
# get next state based on actions taken in this round
next_state = actions # actions taken in this round is next state
# Based on demands, calculate reward for all agents, and add observation to agents
seller_utilities = []
seller_penalties = []
seller_provided_resources = []
for j in range(0, seller_info.count):
x_j = X[j]
tmpSellerUtility, tmpSellerPenalty, z_j = sellers[j].reward(x_j,yAll)
reward = tmpSellerUtility+tmpSellerPenalty
# Update seller values
sellers[j].add_purchase_history(x_j, z_j)
seller_utilities.append(tmpSellerUtility)
seller_penalties.append(tmpSellerPenalty)
seller_provided_resources.append(z_j)
# set current state to next state
env_state=next_state
# Get seller utilties and penalties in history
seller_utilities = np.array(seller_utilities)
seller_penalties = np.array(seller_penalties)
seller_utility_history.append(seller_utilities)
seller_penalty_history.append(seller_penalties)
# update provided resources history
seller_provided_resources = np.array(seller_provided_resources)
provided_resource_history.append(seller_provided_resources)
eval_dict = {
'policy_store': train_config.agents_store_dir,
'buyer_info': buyer_info,
'seller_info': seller_info,
'price_history': price_history,
'seller_utilties': seller_utility_history,
'seller_penalties': seller_penalty_history,
'buyer_utilties': buyer_utility_history,
'buyer_penalties': buyer_penalty_history,
'demand_history': purchase_history,
'supply_history': provided_resource_history
}
return eval_dict
| 40.171875
| 121
| 0.677071
|
96c3dca28485c51f2f5547dae5ac9aa97d6d99f7
| 317
|
py
|
Python
|
live_notes/run_notebook.py
|
timdavidlee/fastai_dl_p2_2019
|
760a28d4dc320848ba28dfd7146ff6bab51499e9
|
[
"MIT"
] | null | null | null |
live_notes/run_notebook.py
|
timdavidlee/fastai_dl_p2_2019
|
760a28d4dc320848ba28dfd7146ff6bab51499e9
|
[
"MIT"
] | null | null | null |
live_notes/run_notebook.py
|
timdavidlee/fastai_dl_p2_2019
|
760a28d4dc320848ba28dfd7146ff6bab51499e9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import nbformat,fire
from nbconvert.preprocessors import ExecutePreprocessor
def run_notebook(path):
nb = nbformat.read(open(path), as_version=nbformat.NO_CONVERT)
ExecutePreprocessor(timeout=600).preprocess(nb, {})
print('done')
if __name__ == '__main__': fire.Fire(run_notebook)
| 28.818182
| 66
| 0.760252
|
a0aa5f699454db21b3adb979284b1b106cf2bc73
| 4,035
|
py
|
Python
|
alipay/aop/api/request/MybankCreditSceneprodLoanModifyRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/MybankCreditSceneprodLoanModifyRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/MybankCreditSceneprodLoanModifyRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MybankCreditSceneprodLoanModifyModel import MybankCreditSceneprodLoanModifyModel
class MybankCreditSceneprodLoanModifyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, MybankCreditSceneprodLoanModifyModel):
self._biz_content = value
else:
self._biz_content = MybankCreditSceneprodLoanModifyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'mybank.credit.sceneprod.loan.modify'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.827586
| 166
| 0.648327
|
5fd2fe69630b7eb58731a706e8d30e219775a54a
| 3,645
|
py
|
Python
|
setup.py
|
RestaurantController/py_vmdetect
|
4000524e77f295f29e18224e1518eab0f16ba06a
|
[
"MIT"
] | 1
|
2021-11-17T03:17:30.000Z
|
2021-11-17T03:17:30.000Z
|
setup.py
|
RestaurantController/py_vmdetect
|
4000524e77f295f29e18224e1518eab0f16ba06a
|
[
"MIT"
] | null | null | null |
setup.py
|
RestaurantController/py_vmdetect
|
4000524e77f295f29e18224e1518eab0f16ba06a
|
[
"MIT"
] | 1
|
2021-04-20T20:55:26.000Z
|
2021-04-20T20:55:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import sys
import os
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
sources = ['py_vmdetect/src/vmdetect.cpp']
libraries = []
include_dirs = [] # may be changed by pkg-config
define_macros = []
library_dirs = []
extra_compile_args = ['-fPIC',]
extra_link_args = ['-shared']
requirements = ['Click>=6.0',
'cffi>=1.12.3'
]
setup_requirements = []
test_requirements = []
no_compiler_found = False
if 'freebsd' in sys.platform:
include_dirs.append('/usr/local/include')
library_dirs.append('/usr/local/lib')
if __name__ == '__main__':
from setuptools import setup, Distribution, Extension, find_packages
#test_copiler()
#cpython = ('_vmdetect_backend' not in sys.builtin_module_names)
class VMDetectDistribution(Distribution):
def has_ext_modules(self):
# Event if we don't have extension modules (e.g. on PyPy) we want to
# claim that we do so that wheels get properly tagged as Python
# specific. (thanks dstufft!)
return True
class VMDetectExtension(Extension):
def __init__(self, name, sources, *args, **kw):
if 'darwin' in sys.platform:
os.environ["CC"] = 'clang'
os.environ["CXX"] = 'clang++'
os.environ["CFLAGS"] = "-stdlib=libc++ -mmacosx-version-min=10.12 -fno-strict-aliasing -Wsign-compare -fno-common -dynamic " \
"-DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -arch x86_64 -g -fPIC"
os.environ["LDSHARED"] = "clang++ -stdlib=libc++ -undefined dynamic_lookup " \
"-mmacosx-version-min=10.12 " \
"-arch x86_64 -g -shared"
Extension.__init__(self, name, sources, *args, **kw)
setup(
author="Andres Kepler",
author_email='andres@kepler.ee',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
python_requires='>=3.6',
description="Python virtual machine detection tool detects virtual environment - VMWare, XEN, FreeBSD jail etc",
entry_points={
'console_scripts': [
'py_vmdetect=py_vmdetect.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='py_vmdetect',
name='py_vmdetect',
packages=find_packages(include=['py_vmdetect']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/kepsic/py_vmdetect',
version='0.2.4',
zip_safe=False,
distclass=VMDetectDistribution,
ext_modules=[VMDetectExtension(
name='_vmdetect_backend',
include_dirs=include_dirs,
sources=sources,
libraries=libraries,
define_macros=define_macros,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
language = 'c'
)],
)
| 33.136364
| 142
| 0.587654
|
6f3715540cb9e27e10cfdd4c173cda7221d74f79
| 988
|
py
|
Python
|
apps/game/views.py
|
SharifAIChallenge/saint-peters-gates
|
7b685f3d7a7b789c055a5c401209a3014de89239
|
[
"MIT"
] | null | null | null |
apps/game/views.py
|
SharifAIChallenge/saint-peters-gates
|
7b685f3d7a7b789c055a5c401209a3014de89239
|
[
"MIT"
] | null | null | null |
apps/game/views.py
|
SharifAIChallenge/saint-peters-gates
|
7b685f3d7a7b789c055a5c401209a3014de89239
|
[
"MIT"
] | null | null | null |
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from services.kafka_cli import KafkaClient
from .serializers import GameRegisterSerializer
import uuid
from apps import permissions
class PlayGameAPIView(GenericAPIView):
permission_classes = [permissions.IsBackend]
serializer_class = GameRegisterSerializer
def post(self, request):
priority = request.GET.get('priority', '-1')
if priority.isnumeric():
priority = int(priority)
if priority != 1:
priority = 0
game_id = uuid.uuid4()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
game_information = serializer.data
game_information['game_id'] = str(game_id)
KafkaClient.register_match(priority, game_information)
return Response(data={'game_id': game_id}, status=status.HTTP_200_OK)
| 34.068966
| 77
| 0.719636
|
02ae025bf3b12c70e5ae77f9cb1d66ebf79087bc
| 6,007
|
py
|
Python
|
app/authentication/auth.py
|
LouisStAmour/notifications-api
|
16734595e70113d85fb10689017b2c30bab61fb3
|
[
"MIT"
] | null | null | null |
app/authentication/auth.py
|
LouisStAmour/notifications-api
|
16734595e70113d85fb10689017b2c30bab61fb3
|
[
"MIT"
] | 1
|
2021-04-30T21:09:42.000Z
|
2021-04-30T21:09:42.000Z
|
app/authentication/auth.py
|
LouisStAmour/notifications-api
|
16734595e70113d85fb10689017b2c30bab61fb3
|
[
"MIT"
] | null | null | null |
from flask import request, _request_ctx_stack, current_app, g
from notifications_python_client.authentication import decode_jwt_token, get_token_issuer
from notifications_python_client.errors import (
TokenDecodeError, TokenExpiredError, TokenIssuerError, TokenAlgorithmError, TokenError
)
from notifications_utils import request_helper
from sqlalchemy.exc import DataError
from sqlalchemy.orm.exc import NoResultFound
from app.dao.services_dao import dao_fetch_service_by_id_with_api_keys
GENERAL_TOKEN_ERROR_MESSAGE = 'Invalid token: make sure your API token matches the example at https://docs.notifications.service.gov.uk/rest-api.html#authorisation-header' # noqa
class AuthError(Exception):
def __init__(self, message, code, service_id=None, api_key_id=None):
self.message = {"token": [message]}
self.short_message = message
self.code = code
self.service_id = service_id
self.api_key_id = api_key_id
def __str__(self):
return 'AuthError({message}, {code}, service_id={service_id}, api_key_id={api_key_id})'.format(**self.__dict__)
def to_dict_v2(self):
return {
'status_code': self.code,
"errors": [
{
"error": "AuthError",
"message": self.short_message
}
]
}
def get_auth_token(req):
auth_header = req.headers.get('Authorization', None)
if not auth_header:
raise AuthError('Unauthorized: authentication token must be provided', 401)
auth_scheme = auth_header[:7].title()
if auth_scheme != 'Bearer ':
raise AuthError('Unauthorized: authentication bearer scheme must be used', 401)
return auth_header[7:]
def requires_no_auth():
pass
def requires_admin_auth():
request_helper.check_proxy_header_before_request()
auth_token = get_auth_token(request)
client = __get_token_issuer(auth_token)
if client == current_app.config.get('ADMIN_CLIENT_USER_NAME'):
g.service_id = current_app.config.get('ADMIN_CLIENT_USER_NAME')
for secret in current_app.config.get('API_INTERNAL_SECRETS'):
try:
decode_jwt_token(auth_token, secret)
return
except TokenExpiredError:
raise AuthError("Invalid token: expired, check that your system clock is accurate", 403)
except TokenDecodeError:
# TODO: Change this so it doesn't also catch `TokenIssuerError` or `TokenIssuedAtError` exceptions
# (which are children of `TokenDecodeError`) as these should cause an auth error immediately rather
# than continue on to check the next admin client secret
continue
# Either there are no admin client secrets or their token didn't match one of them so error
raise AuthError("Unauthorized: admin authentication token not found", 401)
else:
raise AuthError('Unauthorized: admin authentication token required', 401)
def requires_auth():
request_helper.check_proxy_header_before_request()
auth_token = get_auth_token(request)
issuer = __get_token_issuer(auth_token) # ie the `iss` claim which should be a service ID
try:
service = dao_fetch_service_by_id_with_api_keys(issuer)
except DataError:
raise AuthError("Invalid token: service id is not the right data type", 403)
except NoResultFound:
raise AuthError("Invalid token: service not found", 403)
if not service.api_keys:
raise AuthError("Invalid token: service has no API keys", 403, service_id=service.id)
if not service.active:
raise AuthError("Invalid token: service is archived", 403, service_id=service.id)
for api_key in service.api_keys:
try:
decode_jwt_token(auth_token, api_key.secret)
except TokenExpiredError:
err_msg = "Error: Your system clock must be accurate to within 30 seconds"
raise AuthError(err_msg, 403, service_id=service.id, api_key_id=api_key.id)
except TokenAlgorithmError:
err_msg = "Invalid token: algorithm used is not HS256"
raise AuthError(err_msg, 403, service_id=service.id, api_key_id=api_key.id)
except TokenDecodeError:
# we attempted to validate the token but it failed meaning it was not signed using this api key.
# Let's try the next one
# TODO: Change this so it doesn't also catch `TokenIssuerError` or `TokenIssuedAtError` exceptions (which
# are children of `TokenDecodeError`) as these should cause an auth error immediately rather than
# continue on to check the next API key
continue
except TokenError:
# General error when trying to decode and validate the token
raise AuthError(GENERAL_TOKEN_ERROR_MESSAGE, 403, service_id=service.id, api_key_id=api_key.id)
if api_key.expiry_date:
raise AuthError("Invalid token: API key revoked", 403, service_id=service.id, api_key_id=api_key.id)
g.service_id = api_key.service_id
_request_ctx_stack.top.authenticated_service = service
_request_ctx_stack.top.api_user = api_key
current_app.logger.info('API authorised for service {} with api key {}, using issuer {} for URL: {}'.format(
service.id,
api_key.id,
request.headers.get('User-Agent'),
request.base_url
))
return
else:
# service has API keys, but none matching the one the user provided
raise AuthError("Invalid token: API key not found", 403, service_id=service.id)
def __get_token_issuer(auth_token):
try:
issuer = get_token_issuer(auth_token)
except TokenIssuerError:
raise AuthError("Invalid token: iss field not provided", 403)
except TokenDecodeError:
raise AuthError(GENERAL_TOKEN_ERROR_MESSAGE, 403)
return issuer
| 40.315436
| 179
| 0.684368
|
7aff4811327fff17baaf32dcc724cac78f4f3b4d
| 1,748
|
py
|
Python
|
samples/openapi3/client/petstore/python-experimental/tests_manual/test_no_additional_properties.py
|
JigarJoshi/openapi-generator
|
785535b8d6881b358463994823abbda2b26ff42e
|
[
"Apache-2.0"
] | 1
|
2022-01-24T08:22:21.000Z
|
2022-01-24T08:22:21.000Z
|
samples/openapi3/client/petstore/python-experimental/tests_manual/test_no_additional_properties.py
|
JigarJoshi/openapi-generator
|
785535b8d6881b358463994823abbda2b26ff42e
|
[
"Apache-2.0"
] | 9
|
2021-11-01T08:59:31.000Z
|
2022-03-31T08:31:57.000Z
|
samples/openapi3/client/petstore/python-experimental/tests_manual/test_no_additional_properties.py
|
JigarJoshi/openapi-generator
|
785535b8d6881b358463994823abbda2b26ff42e
|
[
"Apache-2.0"
] | 1
|
2022-02-06T21:14:46.000Z
|
2022-02-06T21:14:46.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import petstore_api
from petstore_api.model.no_additional_properties import NoAdditionalProperties
class TestNoAdditionalProperties(unittest.TestCase):
"""NoAdditionalProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNoAdditionalProperties(self):
"""Test NoAdditionalProperties"""
# works with only required
inst = NoAdditionalProperties(id=1)
# works with required + optional
inst = NoAdditionalProperties(id=1, petId=2)
# needs required
# TODO cast this to ApiTypeError?
with self.assertRaisesRegex(
TypeError,
r"missing 1 required keyword-only argument: 'id'"
):
NoAdditionalProperties(petId=2)
# may not be passed additional properties
# TODO cast this to ApiTypeError?
with self.assertRaisesRegex(
TypeError,
r"got an unexpected keyword argument 'invalidArg'"
):
NoAdditionalProperties(id=2, invalidArg=2)
# plural example
# TODO cast this to ApiTypeError?
with self.assertRaisesRegex(
TypeError,
r"got an unexpected keyword argument 'firstInvalidArg'"
):
NoAdditionalProperties(id=2, firstInvalidArg=1, secondInvalidArg=1)
if __name__ == '__main__':
unittest.main()
| 26.892308
| 174
| 0.653318
|
9e40e062b3334c67fca4f08c91f9854fe18d786e
| 138
|
py
|
Python
|
es005.py
|
GuilhermeDallari/desafiomuundo1
|
df4e06393970d2b686622d13d0ef433b761164fd
|
[
"MIT"
] | null | null | null |
es005.py
|
GuilhermeDallari/desafiomuundo1
|
df4e06393970d2b686622d13d0ef433b761164fd
|
[
"MIT"
] | null | null | null |
es005.py
|
GuilhermeDallari/desafiomuundo1
|
df4e06393970d2b686622d13d0ef433b761164fd
|
[
"MIT"
] | null | null | null |
n1 = int(input('digite um numero: '))
a = n1 - 1
b = n1 + 1
print('o antecessor do número {} é {}, e seu sucessor é {}'.format(n1, a, b))
| 27.6
| 77
| 0.57971
|
ba2d7ee907f2555410e3495a61137c9986b99464
| 350
|
py
|
Python
|
api/urls.py
|
ikbrunel/higharc
|
3f56eb007a4d5e52dc3e06df1b2b7a9c30ec54e8
|
[
"CNRI-Python"
] | null | null | null |
api/urls.py
|
ikbrunel/higharc
|
3f56eb007a4d5e52dc3e06df1b2b7a9c30ec54e8
|
[
"CNRI-Python"
] | null | null | null |
api/urls.py
|
ikbrunel/higharc
|
3f56eb007a4d5e52dc3e06df1b2b7a9c30ec54e8
|
[
"CNRI-Python"
] | null | null | null |
from api.views import SmoothieViewSet, SmoothieIngredientViewSet
from rest_framework.routers import DefaultRouter
api_router = DefaultRouter()
api_router.register(
r'smoothie', SmoothieViewSet, basename='smoothie')
api_router.register(
r'ingredient', SmoothieIngredientViewSet, basename='smoothie-ingredient')
urlpatterns = api_router.urls
| 31.818182
| 77
| 0.822857
|
c859f44d148f9e1d6fffbb9f6d225cadd222b6ea
| 62,692
|
py
|
Python
|
Lib/site-packages/PySide/examples/graphicsview/dragdroprobot/dragdroprobot_rc.py
|
heylenz/python27
|
bee49fa9d65b8ab7d591146a5b6cd47aeb41d940
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
Lib/site-packages/PySide/examples/graphicsview/dragdroprobot/dragdroprobot_rc.py
|
heylenz/python27
|
bee49fa9d65b8ab7d591146a5b6cd47aeb41d940
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
Lib/site-packages/PySide/examples/graphicsview/dragdroprobot/dragdroprobot_rc.py
|
heylenz/python27
|
bee49fa9d65b8ab7d591146a5b6cd47aeb41d940
|
[
"bzip2-1.0.6",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Fri Jul 30 18:00:51 2010
# by: The Resource Compiler for PySide (Qt v4.6.2)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = "\
\x00\x00\x3a\x7c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x84\x00\x00\x00\xb1\x08\x04\x00\x00\x00\xaf\xfa\xdd\x32\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd6\x03\
\x10\x0a\x31\x18\xc7\xac\x62\xef\x00\x00\x00\x1d\x74\x45\x58\x74\
\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x43\x72\x65\x61\x74\x65\x64\x20\
\x77\x69\x74\x68\x20\x54\x68\x65\x20\x47\x49\x4d\x50\xef\x64\x25\
\x6e\x00\x00\x00\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\
\x00\x39\xe4\x49\x44\x41\x54\x78\xda\xd5\xbd\xd9\xb2\x5d\xc7\xb5\
\x9e\xf9\x65\xe6\x6c\x57\xbb\xf7\x06\x36\x00\xf6\x24\xa8\xc6\x75\
\x8e\x1d\x61\x59\xf2\x45\xd5\x45\x5d\x54\xd1\xe1\x8b\xba\x26\x1f\
\x81\x7a\x04\xea\x11\xc8\x47\x10\x1f\x41\x7a\x04\x31\xea\xae\xae\
\x7c\x8e\xe3\xd4\x29\x47\x58\x16\x4d\x1c\x51\x02\x89\x6e\x77\xab\
\x9d\x7d\xa6\x2f\x72\xcc\x5c\x73\x2d\x6c\x10\x0d\x49\x85\x0b\x0c\
\x92\x00\x76\xb7\xd6\x98\x99\xa3\xf9\xc7\x3f\xfe\xa1\x56\xfc\xb8\
\xbf\xdc\xe0\xf7\x2a\xfc\x9d\x02\x2c\x4a\x7e\x3f\xfc\x53\x4b\x07\
\xc4\x18\x2a\xda\x8f\xdb\xdf\x72\x19\xfd\x26\xfe\x3c\x46\x63\x9f\
\xf1\x13\x2c\xa0\xe4\xbb\x70\xcd\x4f\x7b\xb1\x5f\xea\x6f\x67\x08\
\xb5\xf7\x37\x8a\x0e\x85\x1a\x18\xa2\xa2\xfd\xb4\xf9\xa4\x43\x61\
\x30\xbf\x57\xf7\x16\x9f\x14\x40\x4e\x0c\x8c\x3e\x9b\xfc\xa6\xa1\
\xc3\xde\x8d\xee\xc5\x68\xc0\xe1\x00\x15\xbe\x9b\xda\x33\xf9\xff\
\xd4\x86\xf0\x2f\xdb\xc9\x5b\x77\x68\x1c\x96\x4e\x8c\xd2\x7c\x7a\
\xf1\x49\x41\x4d\xca\x98\x88\x92\x35\x5b\x1c\x09\x19\x0a\x4b\x42\
\x4e\x8d\x05\x52\x26\xe4\x2a\x12\xf3\x29\xf9\x09\xea\xe0\x27\xbd\
\xbc\x21\x22\xfe\x66\xbf\xfa\x0b\x01\x1a\xb0\x68\x3a\xca\x8f\xb7\
\xbf\xad\xb1\x54\x94\x6c\x51\xa4\x28\x1a\x36\x5c\xb2\x24\x26\xa7\
\xe2\x8a\x0a\x03\xb4\x14\x68\x62\x72\xe6\x1c\xbb\x39\xb9\xca\xe4\
\xad\x2a\x39\x1d\xdf\xef\xd7\xdf\xd0\x10\x6a\x70\x84\x2d\x0d\xd5\
\xdd\xf2\xab\x4b\x2e\xa8\xe8\x68\xd8\x72\xcc\x31\x73\x1c\x0b\xce\
\xd8\x62\xd1\x94\x14\x3c\x62\x43\x06\x14\x18\x12\x32\xb6\x6c\x29\
\xe9\x98\x3b\xad\x22\x8c\x7c\xd7\xff\x5f\x19\xa2\x3f\x17\x16\x47\
\xc3\xd6\x2d\x58\x73\xc6\x25\x30\x22\x22\x26\x05\x0a\x0a\x9e\x70\
\x41\x45\xc4\x15\x0d\x25\x05\x1d\x25\x06\x47\x8c\xc2\x51\x51\x51\
\x53\xb1\xe0\xc4\xa5\x8c\x2f\x47\x27\x11\xfa\xda\xab\xc1\xff\xac\
\x3e\xc2\x7b\x85\x9a\x0e\x28\xdd\x8a\xfb\x6c\x38\xa3\x60\xc2\x29\
\x33\x34\x11\x5b\x56\xac\x28\xa9\x29\x69\x38\x63\x83\xe1\x98\x31\
\x2d\x96\x0c\x87\x01\x6a\x1a\x14\x19\x29\x77\x98\x73\xca\x09\xe3\
\xf7\xd3\x7b\xe6\x7b\x3b\xcb\xe8\xd5\xdf\x9c\x7a\xea\x8d\xf6\xb7\
\xbf\x43\xc9\x53\xe2\xc0\x55\xb6\x34\x1f\x17\xbf\xdd\xb2\xa1\xe4\
\x3e\x1d\x96\x11\x63\x12\x34\x86\x2b\x36\x5c\xb1\x66\xcb\x96\x25\
\x25\x57\x4c\x99\xd3\xf0\x98\x16\x4d\x4c\xd3\xc7\x16\x22\x66\x4c\
\x80\x05\x6b\xd6\xcc\xbf\x9a\x31\x57\x99\x44\x21\xff\x1a\xa0\xc5\
\x1c\x38\xd3\x1f\xe1\x44\xb8\x67\x58\xdc\x49\x36\xe0\x7f\xf5\x41\
\x4e\x0d\x4e\x44\xcd\x85\x5b\x52\x72\xce\x43\x2e\xb0\x8c\xb8\xc1\
\x18\x47\x83\x43\xb3\xe1\x82\x15\x1b\x4a\xb6\x54\x28\x12\x62\x34\
\x1d\x2d\x00\xe7\x00\xc4\xa4\xa4\xc4\x18\x6e\x62\xd1\x44\x64\x9c\
\xf0\x26\xb7\x78\x53\xc5\x98\xf0\xca\xdc\xde\xef\x86\xa1\x5b\xfd\
\x18\x3e\x62\xdf\x24\x3e\x18\x6a\x1c\x0d\xb5\xb3\xe8\xcb\xf4\xa4\
\xf7\xed\x0d\x06\x47\xf5\xe9\x39\xe7\x94\x3c\xe0\x4b\x22\x72\xc6\
\x28\xd6\x2c\x68\x48\x49\xb9\xe2\x21\x17\x94\x34\x58\x2c\x73\x3a\
\x3a\xac\x18\xb8\xa3\xa6\x45\xa1\x18\x13\x63\xd9\xb2\x92\x8f\x6b\
\xe6\x9c\xf1\x0e\xb8\x19\xe3\x5f\xeb\xcf\x15\x86\x18\xb5\xf7\x50\
\x86\x67\xf7\x7a\x63\x7c\xcf\x13\x31\xbc\x97\xed\x20\x2e\x94\x77\
\xb7\x5f\xad\x51\xc4\x8c\x2f\x27\x27\x11\x8e\x0e\x43\xc9\x63\xf7\
\x35\x8f\xd9\xf0\x98\x87\x24\xdc\xe6\x84\x96\x4b\x1a\x26\xcc\xf8\
\x86\x25\x17\xac\xb0\x44\x24\xc4\xa4\x28\x8c\x98\x01\xa0\xa2\xa2\
\x21\x66\x4c\x4c\x4b\xcd\x82\x88\x84\x08\x48\xb9\xcd\x1d\xde\xe1\
\x84\x13\x72\x12\x72\x95\x60\xb0\x72\x55\x86\x79\x86\x1b\xc4\xaf\
\x1f\x25\x6a\xa8\x70\x19\x2c\x0e\x4d\x7a\x4f\xab\x09\x6b\x77\xc1\
\xf9\xf1\xc8\x1d\x33\x55\x31\x9b\x0f\x1e\xfd\xe1\x6b\xce\xb9\xe2\
\x9c\x0d\x13\x40\xb1\xe0\x31\x35\xc7\x38\x9e\xf0\xdf\x68\xb1\x40\
\x42\x8c\x4f\xa9\x33\x72\xa0\xa2\x41\x11\x61\x69\xa9\xd9\xb0\xc1\
\x88\x07\xf2\x97\xa7\xa5\xe6\x9c\x86\x29\x2d\x1d\xc7\x1c\x01\x8e\
\x6e\xcf\x87\xb9\xe7\x86\xd8\xe8\x87\x88\x07\x4a\x6e\xaf\xa5\xb8\
\xbb\xf8\xaa\xc6\xa0\x68\xb9\xf1\x59\xc5\x9a\x05\x09\x25\x85\xcb\
\x78\xcc\x3d\xee\xd3\xb2\xe1\x09\x96\x39\x8e\x2b\xb6\x6c\x99\x10\
\x71\xce\x5f\x29\x31\x24\x68\x1c\x25\x0b\x5a\x26\x8c\xa8\xb1\x6c\
\x28\x81\x08\x47\x4d\x45\x41\x87\x26\x23\x46\xd1\xe1\x48\x71\x40\
\x89\xe1\x4b\x26\x9c\xf2\x3a\x11\x63\x0c\x4a\x2e\xd5\xbe\xb3\xfe\
\x51\x9c\xa5\x93\x3c\xb1\x2f\x86\x62\x2a\xce\xdd\x37\xac\x70\x54\
\x2c\xb9\x43\xc1\x8a\x12\x48\x48\x31\xac\x79\xc2\x0a\x45\xc1\x05\
\x35\x39\x19\x1d\x90\x12\x51\x70\xc9\x92\x11\x96\x96\x8a\x92\x9a\
\x0e\xcb\x1c\x83\xa6\xa5\xa4\x06\x14\x29\x96\x8e\x0e\x87\x26\xc2\
\xe0\x80\x84\x8c\x94\x88\x8c\x9c\x2d\x19\x47\xbc\xc5\xff\xc2\x4f\
\x39\x51\xd1\xc0\x47\xb8\xa7\x6a\x11\xf5\x43\x9e\x08\x9f\xda\xb6\
\x74\x1f\xb7\x9f\xda\x63\xc7\x44\x55\xff\xe0\x1d\x5d\xc7\x15\xdf\
\x72\xc1\x96\x05\x2b\x6a\x14\x31\x31\x13\x1c\xd0\x51\x61\xa9\xd8\
\x70\x8c\xe2\x84\x23\x9e\xf0\x67\x6a\x8e\x68\xd8\xb2\x64\x43\x43\
\x4c\xce\x88\x86\x8a\x8e\x16\x8b\x95\x24\xcc\x07\x68\x03\x34\x54\
\xf2\xd2\x63\x3a\x1c\x2d\x25\x13\xa0\xe4\x9c\xc7\xdc\x20\xfd\x87\
\xd1\xaf\x8c\x44\x2c\xf7\x7d\xf3\x88\xa7\x13\x94\x0e\x27\x5f\xe0\
\xd0\x40\x4d\x71\xd1\x1c\xb7\x94\x94\x74\x94\x6e\xc9\x23\x1e\x71\
\xc6\x86\x0a\xcb\x29\x96\x92\x92\x05\x1b\x6a\x1c\x37\x98\x32\xa1\
\x65\xc9\x96\x88\x11\x0d\xff\x9a\x9c\xff\x8f\x7f\x21\x62\x42\xc9\
\x5f\x68\xa8\x51\x64\x8c\xa4\xe6\xac\x68\x68\x48\xc8\xb0\x54\x74\
\xe4\x44\x38\x22\x32\xa0\x64\x89\xc2\xe2\xe8\x98\x30\x22\x61\x4c\
\x44\x46\xc4\x92\xc7\x8c\x7e\x19\xdd\xcd\xef\xa9\x67\x84\x4a\xf5\
\xaa\x3e\x42\x85\xcc\xc0\xed\xfd\x4d\x7b\x77\x73\x5c\x52\xb3\x61\
\x41\xc1\x92\x4b\x1e\xb3\xa2\x15\xff\xfc\x98\x96\x82\x35\x05\x9a\
\x31\x63\x46\x68\x36\xac\x59\xd1\x31\x22\xe5\x94\x6f\x59\x72\x81\
\xa6\x66\x49\x41\x81\xc3\x61\x29\xe9\xa8\x49\xd8\x62\xe9\xd0\x18\
\x71\x93\x71\x40\x2c\x2a\x14\x8e\x1b\xf2\xd1\x0c\x43\x4b\x87\x22\
\x27\x27\x21\xf1\xce\xf4\x9e\x1b\xbc\x61\xf5\x7d\x4e\x84\xba\xa6\
\x7a\x54\xc1\x03\xb7\x54\x77\x2f\xbf\x7a\xcc\x9a\x8a\x2b\xce\x58\
\xe0\xb8\x62\x41\x4b\x8c\xa2\xa4\xa0\xc0\xd2\xd0\x00\x29\x33\x66\
\x2c\xe9\xc4\x03\x24\xa4\xe4\x38\xfe\xca\x63\x14\x8a\x15\x6b\xc9\
\x0b\x15\x96\x9a\x96\x96\x94\x0c\x7c\x00\x26\xa1\x40\x01\x11\x29\
\xd0\x62\xe9\xb0\x92\xbe\xa5\x80\xc6\xd2\xb1\x65\x2a\xaf\x3b\x25\
\xa5\x71\x77\x54\x44\xf4\xc2\xb9\x65\xf4\x32\x2e\x52\x85\xff\xd6\
\xac\xdd\x43\x2e\x39\x67\x4b\xc1\x25\x67\x6c\x18\x51\xca\x5d\xae\
\x59\xb1\x21\x45\x33\x22\x21\x26\x42\xb1\x61\x25\x3f\xd0\x47\xff\
\x8e\x6f\x59\xd2\xe1\x28\x59\xe2\xc8\x05\xab\xf0\x7e\x40\x03\x39\
\xa0\x88\xc8\x18\x91\x92\xb0\xc0\x90\x12\x53\xb1\xa1\xc5\x52\xd3\
\xa1\x70\x24\xc4\x40\x4b\xcd\x16\x45\xc1\x15\x57\x5c\x72\x83\xad\
\x9b\x32\x23\x57\x26\xa4\xdd\xdf\xdb\x10\xee\xe0\x4f\x1d\x4b\xf7\
\x90\xaf\xd8\x52\x52\xb3\xe6\x92\x15\x0d\x1b\x6a\x1a\x1a\xc9\x03\
\x0d\xa7\x28\x62\x46\xc4\x94\x5c\xb0\x22\xc6\x04\xef\x5d\xd3\xb0\
\x25\x66\xcc\x86\x46\x70\x06\x45\x43\x85\x21\x26\xc6\xa0\x59\x89\
\xd9\xb7\x8c\x88\xe9\xe8\xd8\xd2\x90\xd1\x52\xe1\xe4\xcd\xbb\x41\
\x4d\x33\x25\x92\xef\xdc\xd0\x70\xc9\x82\x37\x78\x8f\xd3\xdf\xe5\
\x1f\x99\x6b\x11\xac\x97\x34\x84\x1b\xfc\xd7\x07\xca\x8e\xb5\x7b\
\xc8\x7d\x2e\xe4\x47\xae\x59\x51\x60\xb9\xa0\x91\x80\x38\x62\x44\
\xca\x2d\x2a\x1a\xf1\xf0\x15\x25\x11\x84\x97\xad\x31\x01\x6c\xcb\
\xc8\x50\x38\x52\xa0\xc3\x11\xcb\x19\x8a\x50\x72\x99\x5a\xc9\x1a\
\x7c\x44\x89\x80\x8c\x29\x5b\x79\x28\x2d\x56\xaa\x92\xfe\x12\x58\
\x36\xb4\x8c\x69\x31\xc4\x1f\x45\xa8\x83\x2a\xe8\x15\x0c\xe1\x06\
\xa6\xd0\x92\x39\x6c\x2e\x1e\xf1\x0d\x17\x74\x14\x6c\x28\xd9\xb0\
\xa1\xa0\xa5\xc4\x61\x18\x71\xcc\x9c\x1c\x43\x22\x61\xb2\xa4\xa6\
\x25\xa1\x23\x26\x23\x0e\x60\x9d\xa6\xc0\x91\x60\x80\x8e\x4e\xae\
\x81\xcf\x12\x22\x34\xb7\x48\x81\x15\x2b\x5a\x34\x8a\x86\x9a\x82\
\x98\x04\x85\xa6\xa6\xa1\x93\xbc\xc2\x31\x26\x61\x2b\x3e\x25\xc3\
\x51\xb2\xe5\x26\xed\xc0\xaf\x7d\xef\xf0\xe9\xc2\xbf\x4a\xfe\x5f\
\x1c\x9f\x71\x41\x85\xa5\xe0\x52\x60\x12\xff\xec\x0d\x23\x6e\x71\
\x9b\x9c\x92\x15\x1d\x0d\x2d\x25\x6b\x1a\x22\x52\x14\x63\x4e\x48\
\xa8\x59\x53\xd2\x62\x68\x80\x8c\x9a\x12\xc5\x98\x92\x98\x44\x0a\
\x37\x30\x6c\x49\x18\x01\x8e\x06\x85\xa1\xc6\x60\x88\x89\xe8\xa8\
\x58\x33\xa1\xa1\xa2\x65\x43\x47\x4d\x4a\x07\x82\x58\x75\x34\x54\
\x7c\x49\xc4\x29\x53\xa7\xd5\xee\xb4\xbc\x72\xd4\xd8\x41\xe5\xfe\
\xd0\x6d\xef\x5e\x7e\xf5\x84\x82\x48\xea\x8a\x8a\x85\xd4\x80\x2d\
\x0d\xb7\x78\x93\x63\x34\x4b\xce\x58\x70\x8b\x86\x92\x12\xc8\x99\
\x30\x66\xc6\x88\x31\xb0\x41\x13\xd3\xb1\x06\x1a\x1a\x34\x39\xe0\
\x88\x50\x18\x22\xa9\x5e\x5b\x14\x5b\x4a\x5a\x2a\x2a\x09\xdd\xb7\
\xd8\xe0\xc8\x58\xb1\x25\x65\x83\x96\x1c\x53\x71\xc5\x86\x39\x9a\
\x44\xa2\x86\x22\x42\x73\xc1\x3d\x32\x22\x37\x57\xec\x5d\x8d\xeb\
\x60\x84\xe8\xc5\xa2\x85\x37\x48\xc3\xfa\xab\x73\x9e\x70\xc9\x9a\
\x86\x42\x52\xe1\x1a\x87\x21\xe7\x16\xb7\xb8\x85\x62\x2d\x29\x8e\
\xcf\x04\x63\x62\xc6\x9c\x70\xc4\x29\x19\x09\x0d\x4b\xb4\x9c\x06\
\x0d\x54\x38\x49\xd5\x54\x00\x63\x41\xa1\x71\x28\x32\x34\x19\x05\
\x15\x2d\x73\x22\x0c\x15\x11\x31\x31\xd0\x61\xe9\x68\xd1\x82\x50\
\x74\x38\x6a\x89\x38\xb1\xc0\xbd\x4b\x2e\x39\x11\x2c\xfc\x7b\x45\
\x0d\x35\xb8\x1a\x96\xea\xd3\x35\x2b\x96\x5c\xb1\xa6\xa1\xa3\x0e\
\xb9\x7f\x4a\xc2\x0d\xa6\xf2\x2c\xbd\x63\xad\x25\xce\x27\xdc\xe0\
\x35\x8e\x99\x13\x11\x51\x4b\x47\xa3\x22\x95\x3a\xa3\xa3\xa5\x93\
\xd4\xbb\xa3\x13\x5c\xa3\xa3\x20\x65\xca\x98\x94\x44\x12\xa6\x8c\
\x84\x05\x8e\x91\x9c\x4f\x25\x91\x43\x91\x32\x66\x85\xa5\xa5\xc0\
\xd2\x92\x12\x49\x0d\x73\xc6\x9c\xd4\x45\x2a\xf9\xbe\x51\x43\x0d\
\xf0\x06\xdf\x7e\x89\x80\x92\x92\x88\x9a\x1a\x48\xc9\xc8\x89\x49\
\xa9\xa9\xe4\x2b\x36\x5c\x91\x63\x48\x49\x98\x72\x93\x1b\x1c\x61\
\x30\x18\x14\x53\x20\xa3\x61\x43\x4a\x4c\x41\x45\x43\x4d\x87\x91\
\x53\xd6\xd2\x61\xb1\x68\x1a\x5a\xa9\x42\x33\x14\x1b\x34\x19\x15\
\x0d\x19\x8e\x82\x58\xaa\xd5\x92\x4e\xea\x93\x86\x5a\x6a\x9d\x96\
\x18\x83\xe3\x82\x87\x8c\x48\x88\xdc\x4c\x99\x57\x2f\xba\xdc\x41\
\x87\xaa\xa1\xa4\x09\x5d\x84\x86\x82\x0a\x18\x33\x25\xc2\x31\x01\
\x1a\x36\x54\x94\x6c\xa8\xc9\x49\x98\x73\xc4\x11\x27\x1c\x33\xa6\
\x22\x12\x18\x25\xa7\xa6\xe3\x31\x63\x72\xc1\x28\x15\x35\x11\x25\
\x6d\xf8\xfe\x8a\x31\x8a\x08\x47\x85\x0d\x7e\x43\x03\x1d\x86\x54\
\x4a\x39\x0f\xf0\x57\xb4\x94\x8c\x70\xe1\xba\xb4\x18\x36\x54\x28\
\x72\x52\x62\x52\xa2\xdf\x4d\x3f\x7a\xc5\x5a\xc3\xed\xfd\xde\xd1\
\x7c\xbc\xe6\x9c\x4b\x96\x14\xb4\x68\x29\xa5\x52\x26\x4c\x71\xd2\
\x7e\x71\x6c\x79\x4c\x4d\xca\x11\x13\x66\xdc\xe1\x94\x19\x53\x46\
\x24\x18\x01\x54\x1c\x09\xad\x3c\xf1\x92\x15\x0b\x96\xac\xa9\x28\
\x71\x74\x40\x14\x20\x3a\x1f\x23\x1c\x25\x96\x96\x88\x0d\x31\x8e\
\x92\x08\x43\x22\xd7\x28\x17\x8f\xe3\x42\x81\xee\x30\x68\xe9\xa3\
\xb5\x6c\xb8\x60\xc4\x09\xb3\x0f\x27\xdf\x99\x68\x47\xcf\xf3\x0f\
\x48\x29\x64\x59\xfc\xf6\x8c\x33\x96\x14\x6c\xd8\xd2\x51\x63\x18\
\x33\x66\xce\x98\x06\xcb\x43\x60\xcb\x25\x15\x19\x47\x4c\x38\xe5\
\x88\x9b\x1c\xf9\xa3\x89\x26\x0b\xd8\x76\x2c\x00\x7d\x47\x4b\xc1\
\x8a\x25\x6b\x1a\xbe\xa5\xa5\xa1\xa6\xa6\xa0\xa0\xa2\x42\x49\x38\
\xf4\x10\x6d\xcb\x8a\x12\xcd\x86\x98\x29\x09\x0d\x2d\x5b\x52\x0c\
\xbe\x01\xe8\xaf\xad\xc1\x4a\x44\xf3\x0e\x73\xcb\x25\x19\x0b\x4e\
\xbf\x8f\xb3\xec\x9b\x69\xde\x79\x5d\x72\xc9\x06\x2b\xdf\xbe\x22\
\x25\x21\x66\xca\x84\x14\x03\xfc\x99\x0d\x6b\x34\xa7\xbc\xc1\x14\
\xcb\xeb\x62\xa4\x48\x7a\xdc\xb9\x54\x11\x4a\x92\x70\x1f\x8d\x26\
\x4c\x98\x0b\x1c\xe7\x6b\xcb\x82\x2b\x2e\xd9\xd2\xa0\x31\x74\x58\
\x0c\x13\x46\x5c\xd0\xd1\xa1\xa9\x70\x38\x12\x14\x5b\x6a\x3a\x12\
\xc0\xa0\xb1\x68\x29\xbf\xac\xb8\x50\x5f\x9a\xd5\x18\x2e\x58\x7f\
\xbf\xa8\xe1\xb0\x18\x0c\x57\xee\x1b\x16\x58\x12\xd6\x3c\xe2\x9c\
\x0a\x85\x62\xc6\x31\x29\x1b\x1e\x70\xce\x9a\x4b\x20\xe1\x98\xdb\
\xdc\xe6\x98\x84\x23\x66\xcc\x88\x68\xb1\x44\xc4\x12\xe3\xfb\x80\
\xec\xdf\xa2\xc2\x90\x4b\x1a\x95\x53\x51\xd0\x60\xa9\x29\x28\xb9\
\x92\xe4\xbc\x93\x40\x1b\xf1\x3a\x35\x25\x9a\x92\x4b\x72\x14\x11\
\x31\x06\x43\x42\x84\x61\x2d\x10\x4e\x43\x23\xc5\x79\x42\x47\x4d\
\x4e\xc9\x7d\x72\x5e\x77\x73\xa5\xf6\x4a\xc7\x97\x0a\x9f\x1a\x47\
\xc5\x15\x0f\xd8\xd2\x50\xb2\xe6\x9c\x2b\x22\x66\xdc\xa6\xe4\x5b\
\x2a\x4a\x2a\x4a\x1a\x0c\x09\x13\x6e\x72\x9b\x53\x8e\x48\xc9\x48\
\x42\xd3\x57\x4b\x5e\xe0\x06\x08\xb2\x1e\x70\x1a\x14\x9a\x13\x6a\
\x2a\x81\x5a\x4a\xc1\xac\x4a\x0a\x4a\x5a\x3a\x1c\x09\x5b\xb6\x24\
\x18\x22\x20\x26\x11\x88\xd6\x10\x91\x62\xa4\x5b\xde\x49\x6e\xea\
\x41\xc4\x1e\xff\x7e\x42\xca\x3b\x70\x31\x3b\x51\x07\xdd\x96\x17\
\x30\x84\x95\x63\xdc\xb2\x76\x4f\xf8\x36\x40\x64\x35\x96\x9c\x9b\
\x64\xac\xb9\x60\x25\x57\x48\x91\x33\xe5\x36\x6f\xf2\x1a\x37\x98\
\x08\x9a\xe0\x3f\xa2\x31\x21\x37\xdd\xfd\xab\xa5\x14\x52\x68\x2c\
\x0e\x45\x44\x22\x65\x5d\x43\xc3\x11\x96\x9a\x2d\x5b\xd6\x14\x12\
\xa8\x63\x0a\x22\x12\x3a\x14\xa9\x64\x31\x8a\x58\x8a\xfb\xde\x9b\
\xb5\x74\xb4\x6c\x71\x74\x8c\x04\x1b\x1d\x71\xc1\xcd\xe3\xe9\x33\
\x0b\xf2\xe7\xe6\x11\x96\xfa\xe3\xad\x60\xc7\x0d\x5b\x5a\x12\x66\
\x4c\xc9\xb9\x60\x8b\x66\x24\x21\xad\x25\xe7\x26\x6f\x73\x87\x63\
\xc6\xe4\x24\xb4\x62\x79\x1f\x2d\x76\x67\x4c\x1d\x9c\xb8\xde\x60\
\xde\xe7\xf7\x6d\x01\x45\x8e\xa3\x63\xcc\x96\x94\x35\x55\xc0\x20\
\x0c\xb1\x74\x40\x8d\x54\x25\xbe\xfb\x11\x0b\x6e\xdd\x52\xd1\xd2\
\x52\x48\xd7\x5d\x33\xe6\x84\xdb\x9c\x32\xfe\xcc\xbc\x8a\x8f\xd0\
\xf2\x8d\xca\xdf\x96\xc0\x84\x35\x57\x2c\x68\x18\xe1\xd0\x2c\x79\
\x82\x22\x11\x1f\x60\x88\x78\x9b\x9b\xbc\xce\x31\xa9\x27\x7e\xd0\
\x02\x9a\x98\x48\x5e\xa8\x0a\xff\xf4\x26\xde\xb1\x5d\xf4\xa0\xa8\
\x73\x52\x76\x39\x34\x5a\xcc\x18\x53\xb1\xa4\x91\xaa\x24\x93\x0b\
\xa9\x89\x04\xcc\xdd\x99\xcf\x49\xf5\xd1\xe7\x14\x8a\x35\x09\x27\
\xfc\x8c\x37\x3f\x9b\xfe\x86\xd0\xf6\x79\x69\x67\xd9\x7d\x5c\x52\
\x48\x82\xe2\xf1\x80\x11\x0d\x1b\x56\x82\x30\x54\x94\x68\x66\xcc\
\xb8\xcb\x8c\x39\x39\x0a\x47\x4d\x23\x2f\x49\x63\xa4\x80\xd7\x03\
\x3c\xa2\x0f\xcb\xc3\xee\x53\xff\x46\xfa\x7c\xa0\x09\x9e\x45\x11\
\xd3\x08\x16\xb5\xa1\x95\xcc\x76\x41\x24\xa7\xa1\x4f\xcd\x3b\x81\
\xed\x1a\x71\xf2\x2d\x8d\x3c\x14\x4d\x46\xf2\x1b\x7f\x5e\xa2\x6b\
\x4d\xf1\x7c\x60\xe6\x6e\xc3\x92\x73\x16\x9c\xe3\x18\x31\xc2\x52\
\x52\xb0\x05\x6a\x2a\x4a\x2c\xc7\xbc\xc9\xdb\xbc\x49\x42\x86\x91\
\x00\xe6\xdf\x9a\xde\x33\x02\x03\x5a\x87\x3b\x00\x82\xa1\x93\x0b\
\xd4\x17\x5d\xf1\x80\x1e\xa4\xa5\xab\x31\x62\x49\x23\x78\x83\x0e\
\xde\xa5\x95\xb3\xd3\x49\x20\x75\x18\x2c\x8e\x8a\x12\xe5\x5b\x8d\
\x3c\xe0\x3d\x77\xa4\xcc\x20\x6e\xbd\x64\xd4\x50\xf7\x5a\x7c\x46\
\x79\xce\x94\x39\x39\x2b\x36\xac\xa9\x49\xa8\xa9\x68\xc8\xb9\xc5\
\x4f\x79\x8f\x4c\xb2\x84\xfe\xc6\xab\x70\x7b\xad\xfc\xee\x90\x50\
\xb6\x9f\xf2\xba\x80\x2d\x1a\x79\xb2\xfd\xdf\xfa\x0e\xc5\x48\xfa\
\x5d\x05\x56\x80\x7c\x8f\x4f\x35\xf2\x3d\xbc\xe1\x7c\x46\xd1\x5f\
\x4e\x7f\xb1\x1a\x1e\x73\x8f\xbf\x23\xff\x78\xfc\xf9\xb3\xe0\xdc\
\xef\xe8\x74\xf9\x4f\xae\xb9\xef\xfe\x89\x2f\x29\x78\xc4\x09\xc7\
\x38\xce\x78\xc2\x96\x8e\x52\x90\xe5\xd7\xf9\x37\xbc\xcf\x8c\x34\
\xf8\x00\x7f\xe8\xe3\x90\xf3\x31\xb8\x14\x4a\x7c\x82\x12\x1c\xba\
\x6f\xdf\x80\x19\x38\xe8\xbe\xf0\xea\x42\x42\x67\xa5\xe4\x2f\xd8\
\x4a\xc5\xb3\x11\xc4\xbb\xa1\xa4\x12\xa6\x95\x93\xf0\xdb\x08\x7e\
\xf5\x10\xcb\xeb\xb4\x74\xbc\xcb\x7f\xe4\xdf\x73\xaa\x90\xcc\xf3\
\x25\x33\xcb\x21\x60\xab\x68\x58\xd3\xb2\xa5\x45\x61\x98\x60\x88\
\x19\xf3\x0e\x77\x98\x92\x0d\x0e\x72\x4f\xd8\xd0\x72\xf3\x19\xb8\
\xc9\xde\x0c\x43\x0e\xc3\x21\x1d\x6c\x98\x5f\xf4\x57\xcc\xca\x53\
\xb7\x58\x69\x09\xfb\xfb\x5e\x0b\x24\xac\xa9\x71\x12\xf2\x7d\x2d\
\xe2\x48\x29\x39\xa7\x24\xa6\xe5\x09\xf7\xf8\x19\x37\x9f\x09\xef\
\xbf\x40\x5f\xa3\x7f\xca\x31\x1d\x5b\x5a\x29\x71\x3d\xe9\x6f\x3c\
\x88\x14\x51\x78\x3b\x7a\xf0\x76\x77\x7d\x72\x73\x60\x90\x3e\x70\
\xda\x70\x86\x76\x89\x8e\x0a\x6f\x5d\x85\x33\xe4\xb3\x0e\x8d\x26\
\x16\x28\xbf\xa3\x21\x11\x40\xb7\xe6\x52\x7e\x72\x84\x21\x07\x1a\
\x8e\x58\x71\x25\xc8\x47\xc5\x57\x9c\xf1\x36\xf1\xab\x60\x96\x7d\
\xc1\x93\x90\x48\x09\xdd\x81\xa0\xce\x8a\x15\x09\x23\x4e\x38\x22\
\x0d\xb4\x8c\x9d\x29\xf4\x81\xd5\xd5\x9e\x11\xd8\x0b\x79\x56\x6e\
\xf5\xfe\x67\x0f\x19\x37\x7d\x62\xe6\x82\x89\x7d\x4f\x34\xa2\x95\
\xd8\x14\xb1\x91\xd8\xa1\x89\x84\x56\x60\x98\x30\xc1\x62\xc9\x18\
\xd1\xd2\xd2\x3d\xf3\x0d\x47\xcf\x6f\xe9\x18\x61\x2c\xf4\x81\xac\
\x27\xf5\xf9\xca\xe2\x88\x31\x69\xe8\x88\x0e\x9f\xe9\xae\x05\xa0\
\x0f\x28\x86\x87\x94\x82\x3e\x03\x75\x83\x7c\x43\x4b\xb7\xb5\xaf\
\x78\x6c\xc8\x3c\x34\x31\x56\x12\xa8\xfe\xc4\x59\x20\x97\xae\x4a\
\x6f\xaa\x9c\x8e\x11\xb7\x39\x63\xc1\x8c\x9b\x1c\x93\x85\xcb\xf3\
\x74\xdc\x78\x01\x84\x4a\xcb\x97\x76\x18\x22\xb1\xbf\xff\x41\x19\
\x53\xa6\xa4\x20\x8c\xb7\xdd\xdb\xfa\x6e\x66\xb4\x1b\x98\xf9\x59\
\x6d\x5a\xff\xfc\x87\x0c\x6c\x2d\xdc\x19\x9f\xb2\xfb\x9a\xb6\xbf\
\x62\x16\x18\xd3\x50\xd2\x06\xbf\x34\xa7\x64\x4e\x4a\xc9\x19\x96\
\x39\x6f\x09\x41\xf1\x15\xf1\x08\x27\x00\x47\x2d\x66\xf0\x07\xd9\
\x60\x88\xc8\x98\x30\x22\x96\x00\xe9\x5f\xaa\x93\xec\x61\x68\x8e\
\xa1\x83\xd4\x07\x3c\x26\x35\x68\x2e\xab\x3d\x6a\x8f\x0b\x27\x03\
\x81\x8f\x11\xc2\xb2\x22\x96\xb3\xd1\xc9\x75\x00\x25\x3c\xba\x3a\
\x5c\xcc\x88\x88\x39\x9a\x05\x4f\xe8\x24\x2b\x1e\x46\xaf\x97\x8e\
\x1a\xde\x0c\x95\xf8\x09\x27\x04\x1f\x43\x4c\xce\x84\x09\x29\x4e\
\x3a\x57\xfd\x5b\xe8\xa9\x3d\x7a\x60\x06\x2d\x4f\x4f\xed\x71\x59\
\xf4\xe0\x59\x5f\x47\xe5\xd8\x77\xad\x8e\x88\x2e\x54\x16\x7d\x55\
\xdb\x9f\x94\x4a\xc0\x98\xfe\xf0\x37\x28\x46\xe4\xdc\xe2\x8c\x15\
\x2d\xe7\x14\xc1\xb0\xaf\xe4\x23\x34\x09\x39\x39\x8d\x54\x0d\x11\
\x9a\x84\x98\x9c\x37\xb8\x81\xc1\x92\xa2\xa9\x49\x06\x1d\xf3\x9d\
\xb3\x53\x21\x54\xea\x01\xbd\x60\xc7\x6b\xda\xfd\x33\xf4\x26\xfe\
\xab\x5a\xc9\x23\x08\xd9\x86\x96\xef\xea\xe1\xb9\xfe\x6d\x45\x02\
\xdf\x44\x74\x81\x7a\xe6\xb0\x82\x6c\xbe\x86\xe2\x1e\x8f\x51\x94\
\xb4\x1f\xa4\x5f\x74\xa1\x5c\x7b\xc9\xbe\x86\xa5\x11\x04\x29\x66\
\x84\xa2\xc5\x11\x11\x0b\xd0\x1e\x85\xb2\xc8\x3d\x15\x1f\x86\x66\
\xe1\xa9\xa2\xeb\xba\x4b\xe0\xf6\x3a\x6c\x87\x83\x0d\x6e\xef\xcf\
\x26\x50\x09\xd8\xfb\x73\xef\x33\x1a\x34\x2d\x63\x32\x2c\x05\x8a\
\x98\x96\xf6\x0f\x4e\xbd\xa2\xb3\x74\x58\x6a\x6a\x4a\x22\xc6\xe4\
\xd2\x7f\x82\x88\x91\x74\xad\xd5\x20\xee\xef\x07\x3f\x75\x60\x98\
\x7d\x33\xec\xa2\x8b\xba\xa6\x02\x3d\x74\xb1\xea\x20\xf5\xea\x83\
\x28\x21\xec\x1a\xb9\xba\x5d\x30\x65\x4d\x2b\x4c\xef\x4a\x1a\xcc\
\x0d\x36\x44\x38\xf5\x72\x3e\xc2\x09\x40\x6e\x31\x64\x4c\xb0\x94\
\xf2\xe3\x23\x71\x8e\x96\x76\x2f\xe2\xab\x80\x4c\xee\x67\x92\x4f\
\x67\x11\xbb\xe6\xf2\xf0\x49\x0e\xa3\xca\xae\xed\xf8\x34\xb3\xd3\
\x60\x31\x82\x59\x7b\xaa\xa1\x23\x11\x9e\x96\xa6\x23\x92\x14\xdc\
\xa2\x89\x89\x49\x25\x0e\xbd\x32\x3f\xa2\x47\x8f\x8e\x38\x66\x46\
\xcb\x46\xf2\xb7\x68\x8f\x71\xab\x0f\x12\xec\xc3\x74\xfa\xe9\xf4\
\xf9\xba\xc2\x4b\x85\xb9\x9c\xde\x99\x76\xe1\x9a\xa8\x6b\xd2\x70\
\x27\xa0\xad\xa1\x45\x13\x85\x41\x98\x36\x60\xd9\x3d\x2e\x12\xc9\
\xe9\xed\xd3\x3e\xfd\xf2\x86\xf0\x00\x6b\x47\x4e\x82\xa1\xa2\x96\
\xaa\x2e\x92\x26\xec\x0e\xdb\x3c\xac\x13\xfa\x68\xc1\x41\x03\xf6\
\xba\x68\xee\x0e\x48\xc2\x87\x79\x86\x1a\x64\xaf\x87\x59\x8e\x7f\
\x1d\xad\x64\x98\x0c\x2e\x97\x12\xd2\x4a\xcc\x88\x9a\x86\xfa\x99\
\xd4\xd3\x17\x32\x44\xcc\x18\x43\x1c\x30\x4b\x27\x00\xaa\x0b\xd4\
\x73\x06\xfc\xaa\xdd\x91\x1f\xba\x4a\x37\xf8\xa8\x3b\xe0\x46\xbb\
\xa7\x38\x9b\xbb\xb3\xa1\xf6\xd0\xad\x5d\x72\xd5\x0d\x4c\x61\x88\
\x24\xae\xf4\xf1\xc0\x05\x3c\x14\x39\x11\x39\x25\x1b\xd6\x34\xa4\
\xd7\x9a\x42\xbf\x88\x21\x22\x12\x72\xb2\x10\xa5\x3d\x0f\xd2\x06\
\xb0\xcd\xc9\x8b\x50\x7b\x6f\xfa\xb0\xbe\xd8\xb5\x93\xdd\x35\xc3\
\x47\x6e\x10\xf8\x5e\x84\xda\xa6\x0e\x9c\xa8\x1e\x94\x7b\xc3\xbc\
\xc5\xd2\xd1\x08\xbe\xbd\x66\x43\xf3\x01\x2f\x8b\x50\x59\xf9\x66\
\x11\x63\x26\x6c\x89\x85\x1a\x94\x61\xc8\xc9\xc9\x48\x42\xc6\xa7\
\xc2\xad\x1b\xbe\xf1\xc3\xcc\x80\x01\x5b\x46\xc9\xed\x77\x52\x4e\
\x69\x6a\x31\x96\x1e\x9c\x10\x04\x01\xdf\x75\x43\xfa\xf6\x90\x95\
\x9c\xa1\xa5\x95\xd7\x35\x24\xb0\x3b\x69\x34\x3b\xc9\x83\x23\x62\
\xe6\x38\xd6\x54\x7f\x98\xa8\x57\xa2\x17\x7a\x32\x4e\x42\x41\x23\
\x00\x98\x96\xbe\xb6\x13\x94\x48\xbd\xf0\xac\x8c\x3b\xf0\x08\x2e\
\x14\x52\xee\x9a\xab\x72\xf8\xa7\x1e\xac\x77\x21\xc7\xf0\x81\xbd\
\x0e\xfd\x8b\xdd\xd5\xda\x65\x90\x56\x00\x1e\x2b\xae\xb3\x09\x90\
\xe0\x4b\x1a\x42\x13\x5f\x66\xc7\x09\x4a\x9c\xa4\x15\x42\xb1\x11\
\x0c\xc9\xed\x11\x3b\xae\x4b\x8f\x87\x6f\x69\x3f\x3f\x19\x5e\x14\
\xb7\x67\x9e\x43\x43\x38\x71\x96\x36\x54\x16\x86\x8e\x8e\x5a\x5a\
\xd1\x5a\x20\xba\x9d\xd7\x70\x72\xea\x5a\xaa\x60\x8a\x56\x98\x57\
\xd7\x3d\x3a\xfd\xdd\x4f\xd0\x01\xd1\x47\x89\xdc\xb5\x96\x46\x0a\
\x2e\x35\x78\x36\xfb\x07\xdf\x1d\x20\x52\xd7\x99\xc1\xed\x99\xc0\
\x0e\x00\x3b\x2b\x7f\xe3\xa4\x1e\xd9\x37\x95\x0b\xe0\x1d\x82\x88\
\x3a\x5a\x5a\xc1\x26\x5c\x38\x2b\x2a\xb4\x8f\x9d\x34\x96\x5b\xb9\
\x4c\xdd\x33\xc9\xa7\x2f\xc0\x8f\xd0\x5f\x68\x2c\x15\x48\xac\xde\
\xa5\x39\x1c\x60\x58\xea\xa9\x13\xa5\x9e\x82\x67\x0e\xcd\xe2\x0e\
\x80\xfd\xe1\xef\xd5\x33\xda\x42\x04\x3a\xba\x11\x6c\x42\x07\xcf\
\x42\xc0\xcc\x3b\xb4\xf4\x48\x9c\x34\xa1\xf4\x5e\xd7\xed\x15\x08\
\xa7\x96\x86\x92\x48\xea\xff\x58\x52\x29\xbd\x97\x33\xee\xa7\xad\
\xea\x19\x66\xd8\x9f\xad\xea\x61\x3a\x75\x70\x1d\xd8\xab\x36\x86\
\x95\xcb\xae\x9d\xe3\x24\x9b\xcc\xa5\xdf\x89\x10\x0d\xf5\xe0\xea\
\xf5\xb5\xb3\x91\xb7\xaf\xbf\x83\x97\xfd\x42\xe0\x6d\x27\xd8\x4f\
\x0b\xc2\x80\x8f\x9e\x42\x1d\x9e\x9e\xa1\x52\xd7\xfc\x59\xed\xa5\
\xd6\x7d\x6c\xb2\x52\x4a\xbb\x41\x0e\xa1\xf6\xfa\x60\x9e\xff\xa0\
\x03\x7c\x67\xe5\x6d\xfb\x8e\x67\x89\xa5\x19\x44\xb0\x5d\x33\xc0\
\x8a\x89\x7a\x44\xbd\x27\x25\xbd\xa2\x21\x7a\x27\xd5\x84\xe7\x12\
\x89\x85\xf5\x53\x9e\xe0\xe9\xa2\x8a\xa7\x4e\x89\xdb\xcb\x00\x86\
\x39\x81\xdb\x23\xa8\xec\x17\x6d\x36\x54\xa8\x7d\x8f\xb3\xef\x7c\
\x6a\x2c\xed\x00\x96\x55\x03\xe7\xb7\xc3\x50\x23\x39\x1f\xf6\x65\
\x33\x4b\x1b\x92\x14\x4b\xc3\x48\x6a\xce\x58\x7c\x71\x2c\x63\x47\
\x5d\xc8\x37\xfc\x8b\xea\x68\x65\xd6\x8a\x81\x2f\x51\x32\xdf\xed\
\xf9\x8f\x56\x2e\x96\x0d\x1f\x69\xa8\xa9\x42\xfe\x21\x4e\x5a\x7a\
\x97\x4a\xee\x7f\x47\x23\x90\xa0\x05\x19\x93\xf6\xf0\x40\x42\x4b\
\x23\x8e\x3c\x06\x69\x15\x23\x7c\x3d\x23\x8c\xfe\x29\x2d\x15\x6b\
\x21\xa8\xbc\x34\x9c\xef\xe8\xee\xfa\x16\x4a\x27\x2d\x5d\x3d\x80\
\xdc\x87\x35\x6a\x86\x13\x9a\xb8\x91\x82\x57\x87\xca\xd0\xec\xc1\
\x76\xfe\x2c\xb5\xa1\xe5\xdb\x33\x6a\x86\x10\xcd\x2e\x67\xd0\xf2\
\xff\x48\x60\x39\x23\x28\x59\x29\x83\xb6\xbe\xac\x8a\x83\x63\x74\
\x12\x23\x08\xdf\xad\x67\x62\x9b\x80\xbf\xaa\x97\xcd\x23\x1c\x96\
\xf6\x77\x95\xb4\xe1\x3d\x21\xa3\x0b\x17\x64\x97\x52\xfb\xe3\xd6\
\xca\x33\x89\xe8\x84\x14\xe0\x06\x35\x43\x2c\x2f\xc7\x0e\xfc\x87\
\x09\xc7\x37\x92\xd6\x9d\x0d\x9f\x5f\x87\xf8\xa1\xa4\x73\xa6\x06\
\x6e\xd6\x9b\x3c\x09\x0d\x67\x2d\x6f\xbd\x1b\xc4\x1d\x1b\xfc\x89\
\x11\xfc\x3a\x7a\xf5\x32\xdc\xe1\x7e\xe9\x6f\x67\x4a\x4c\x24\xbd\
\x81\xec\xe0\x9b\x3a\x2c\x6b\x10\xdf\xd1\x48\x73\x58\x87\x52\x59\
\x63\x48\x50\xd2\x1a\xea\xf1\x84\xde\x23\x34\x92\x58\x77\x83\x48\
\xaf\x42\xbe\xb8\xff\x7b\x27\xcc\x28\x2d\x20\x0c\xf4\x3a\x24\x5a\
\x4a\x70\x27\xa7\xb5\x16\x23\x28\x31\x42\x43\x3e\x98\xfe\x7b\x89\
\xab\xe1\x06\x18\x63\xc2\x58\x80\x8d\x8a\x56\xa6\xfb\x4d\xf8\x11\
\x4a\xe0\x90\x84\x14\x2d\x94\x41\x3f\xd4\xac\x85\x11\x97\x91\x50\
\x91\x30\x62\x2c\x06\x88\x04\x69\xd6\x74\x94\x01\x05\xb7\x83\xe8\
\x61\x07\xa9\xb4\x15\xb2\x80\x91\xb3\x86\xd0\xd4\x7d\xc7\x2b\x0e\
\x4d\x67\xa4\x27\xee\x06\x73\x3e\x04\x94\xdb\x8f\x3e\xb8\x97\x47\
\xa8\x76\x9f\x94\xc9\x45\xf0\xe0\x57\x4a\x4e\x26\x13\x17\x36\x70\
\xe2\x33\xe1\xc1\x9e\xf1\x90\x2b\xc1\xb1\x7a\xb7\x97\x92\xf2\x90\
\x11\x73\x66\x44\x92\xa6\xcf\x88\x99\x32\x16\x27\xaa\x07\x59\x9f\
\xdb\x8b\x19\xfb\x25\x7b\x2b\x97\xae\x93\x39\xd0\x46\x1e\x94\xa1\
\x0c\xaf\xb7\xa3\x91\xec\x62\xe7\xaa\x7d\xf8\x4d\xbe\x0f\x42\x85\
\xf4\x35\x5b\x71\x5d\x09\x53\xa6\x8c\x30\x02\xec\x23\x2e\x71\x8d\
\xa3\xe4\x82\x6f\x79\x42\x25\x2d\x7c\x3f\x01\xe8\x69\x5e\x6b\x12\
\xc6\x8c\x85\xd5\x92\x73\x87\x9c\xdb\xdc\xc4\x50\xa1\x19\xc9\x74\
\xe7\x21\xb6\xb9\x03\x64\x94\x70\x7d\x15\x9a\xad\x0c\xcb\xf8\x41\
\x05\x3f\xf5\xd3\x08\xc8\xbf\x33\x87\x0d\xd7\xc4\xb3\x26\x22\x61\
\x7c\x5e\x97\xa9\x3e\xc7\x59\x0e\x59\x6f\x9d\x1c\xe6\x8c\x31\xb9\
\xb8\x2e\x1b\xfa\x97\x8a\x7b\x14\x5c\xf0\x80\x73\x6a\x91\xc5\xf0\
\xdc\x99\xad\x30\xac\x33\x1a\x61\x36\x39\x12\xc6\x28\x46\xe2\xea\
\xae\xe8\x98\x30\x93\x2e\x6b\x2a\xa0\xb0\x1d\xe4\x95\x08\x3b\x6e\
\x29\xfd\x95\x25\x0f\x78\x80\xc2\xb2\x65\x0b\x8c\x19\x71\x84\x21\
\x13\x98\x06\x14\xd5\x20\x87\xf0\x45\x5a\x14\x62\xde\x77\x46\x0d\
\x7b\x00\x91\x3a\xc9\xd9\x7b\xbe\x41\x24\x93\x38\x53\x32\x0c\x11\
\x0d\x2d\x39\x9a\x9a\x98\x96\x3f\x73\x9f\x0b\x2e\xa8\x88\x39\xe6\
\x84\x09\xb0\xa5\xa1\xe1\xb1\xd0\x4a\x52\x20\x65\x4a\xc1\x9a\x88\
\x91\x50\xda\xad\x28\x44\x4c\x79\x44\xc6\x9c\x13\xe1\xca\x38\x1c\
\x35\x29\x46\x98\x96\x25\x9a\x15\x8f\x59\x12\x63\x38\xe3\x01\x8f\
\x68\x65\x84\xa5\x22\x61\xca\x9c\x88\x29\xb7\x38\x0a\x19\x8a\x1b\
\xcc\x00\x2b\xa6\xd4\x24\xcc\x07\x19\xed\x33\x0c\xa1\x0f\xda\x7d\
\x5a\x32\xb1\x86\x3a\x38\x9d\x28\x24\x52\x9e\x23\xe1\x7f\xe0\x92\
\x4b\x1e\x00\x09\x33\x12\x4e\xb8\xc9\x94\x94\x8a\x9a\x2d\x0d\x6f\
\xf0\x33\x1e\xf2\x84\x2b\x2c\x8a\x8c\x09\x29\x96\x25\xaf\xb1\xe2\
\x42\x32\xbe\x9a\x05\x05\x73\x8c\x74\xaf\xad\xa4\xf0\xc3\xb3\xb9\
\xe2\x8c\x6f\xb9\x40\xa1\x59\x71\x8e\x9f\x2a\xae\xa8\x28\xd8\xb0\
\x62\x49\xc4\x98\x25\x13\x52\x72\xc6\x02\xf4\x37\x32\xc6\xc0\xc1\
\x95\x78\xc1\xab\xe1\x64\x30\xa1\x67\x4f\x1b\x0c\x85\x70\x99\x12\
\xe9\x80\x1a\x8c\xf4\x1b\xcf\xf9\x92\x0b\x12\x22\x6e\x72\x83\x5b\
\xcc\x89\xe8\x18\x13\x51\x53\x11\xa1\x79\xc4\x3d\xbe\xe2\x9c\x73\
\x96\xdc\xe0\x98\x88\x96\x35\x25\x31\x25\x11\x25\x15\x0d\x6b\x32\
\x89\x23\x3b\x6a\x88\x0d\xdd\xae\x96\x15\x57\x3c\xe1\x02\x45\xc4\
\x96\x0d\x2d\x75\x68\x36\x55\x54\x6c\x88\x18\xb3\x25\x23\x63\xc2\
\x94\x19\x63\x22\x0a\xac\xc8\xf4\xc8\x49\xbf\xd4\xcf\x33\x84\x3d\
\xf8\xb0\xc5\x12\xe3\x58\xf3\x2d\x8f\x50\x4c\x25\x06\x18\xa9\x3f\
\x95\xcc\xdf\x44\xac\x79\x02\x8c\x18\x73\xc2\x9c\x29\x09\x2d\x2d\
\x39\x19\x33\xe9\x2c\xdc\x62\xc4\x31\x0f\xf9\x9a\x2b\x20\x66\x44\
\xc7\xa5\x38\xcf\x86\x0e\x4b\x4c\x4a\x4e\x8e\xc1\x49\xc1\xdc\xe7\
\x14\x25\x86\x8e\x82\x0b\xae\x28\xe8\x30\xc2\x74\xd8\xd2\xe2\x48\
\x98\x30\x62\x43\xc9\x45\x80\xf2\x4b\x0a\x56\xb4\x18\x26\x44\x74\
\x32\xf3\x23\xc8\xea\xbd\x57\xe0\x59\x6a\x14\x9b\x0f\xbe\xe4\x3f\
\xb3\x21\xe5\xef\x24\x19\x76\x18\x91\x3c\xb1\x42\x51\xb7\x8c\x19\
\xf1\x2e\x53\xc6\x92\x3c\xf9\x63\xbd\x95\xb4\x67\x4b\xcb\x94\xb7\
\xb9\xc3\xbb\x3c\xa1\x20\x26\xc7\x90\x01\x19\x29\x29\x96\x12\xc8\
\x18\x13\x61\x69\x42\xea\x6d\x05\x95\x74\xb4\x6c\x79\xc0\x05\x25\
\x0e\x47\xc5\x8a\xb5\x38\xc3\x58\x94\x03\x1c\x09\x0d\x05\x2d\x39\
\x29\x85\x24\xe4\xa7\x81\x75\xe9\x06\x88\x85\xbb\x06\x3d\x89\x9e\
\xcd\x60\xf0\xb5\x44\xf9\x87\x07\x14\xcc\x98\x85\x32\xa6\xc7\x05\
\x3c\x63\xc5\x51\x11\xf1\x3a\x13\x6e\x93\xd1\xab\x02\x69\x34\xf7\
\xb9\xc2\x72\x44\xca\x9a\x95\xc0\xea\x0d\x15\x6b\x62\xa6\xdc\xe6\
\x0e\x17\x6c\x51\x8c\x99\xb0\xa1\x20\x66\x44\x1a\xaa\x12\x8f\x2d\
\x69\x29\xb5\x6a\xd6\xac\xd8\x50\xe1\xe8\xd8\xca\xb8\x8b\x21\xc2\
\x90\x91\x32\x25\xc6\xb1\x60\x23\x53\x1f\x09\x96\x15\x1a\xc3\x0d\
\xc6\xa4\xc4\xe4\x92\x6a\xeb\x2f\x9e\xdb\xd7\x50\x21\x43\xf7\xe5\
\x4e\x84\x23\xa6\xa1\x66\xc4\x2d\x92\x41\x72\xbc\xcb\xe2\x13\x5a\
\x0a\x62\x6e\x93\x4a\x89\x5b\x62\xa9\x58\x71\xc1\x5f\x31\x32\x85\
\xb3\x61\xc1\x1a\x45\xc9\x96\x0d\x2b\x14\x5b\x1c\xb7\xc8\x88\x48\
\x49\xe8\x18\x91\x90\x92\x90\x49\xc7\xbd\x11\xd1\x04\x23\x18\x75\
\xc1\x82\x9a\x92\x42\xc2\xa8\x93\x6e\x9b\xcf\x6f\x53\x52\xa6\xcc\
\xb9\xe0\x4a\x26\x02\x7d\xa2\x7f\x85\x61\x4a\x42\x4a\xc6\x44\x46\
\x2c\xcd\x6f\x9e\xcb\x8f\xd8\x4d\x66\x74\x1f\xb7\x9f\xba\x63\xd0\
\xff\xc8\xaf\x2a\xd6\x94\x5c\xb0\xe1\x17\xd2\x6d\x54\x82\x05\x68\
\x2c\x29\x50\xc9\xd8\x51\x19\x22\xff\x86\xff\xce\x1f\x89\xf9\x19\
\xef\xa0\xd9\x92\x32\xa2\x65\xc4\x4a\x38\xb4\x4f\x58\xf1\x80\xbf\
\xe7\x94\x3b\x24\xac\x29\xd1\x4c\xa5\x70\xf2\x67\xae\xa1\x16\x16\
\x54\x4b\x4d\xc4\x86\x25\x35\x5b\x36\x38\x69\xef\x25\xe4\xa4\x18\
\x1c\x89\xd4\x3d\xaf\x71\xc2\x4a\xc6\x6a\x1a\xa0\xa5\x44\x73\x2a\
\x7a\x88\x86\x38\xd0\x0a\xae\xcd\x9a\x56\x7b\xb5\x45\xc7\xe6\x77\
\x97\x1f\x16\x64\x22\x7b\x51\xb1\xe4\x9c\x87\x3c\xe6\x8a\x88\x92\
\x94\xb7\x79\x87\x09\x1d\x25\xb7\x68\x29\x28\x24\xc5\xee\x98\x70\
\xc5\x25\x15\x2b\x96\x18\x66\xac\x79\x8c\xa5\xe0\x3e\x1d\x13\x2a\
\x0c\x39\x9a\x86\x94\x9c\x2d\x4f\x58\xf3\x3a\x7f\xcf\xfb\x4c\xa8\
\x29\x69\x49\x31\xcc\x49\xa8\xe8\x50\xd4\x74\x94\x94\xf2\xaa\x56\
\xac\xf9\x0b\x97\xac\x64\x22\x50\x93\x70\x2c\x95\x8b\x67\xea\x67\
\x52\x5e\x95\xac\x59\xb0\x16\xbd\x02\xc3\x1b\xfc\x8c\xb7\xb9\xcd\
\x9c\x12\xcb\x7b\xfc\x44\xe6\xfd\xbe\xf3\x44\x28\x1c\xd5\x87\x97\
\xac\x99\x93\x48\x17\x23\x62\x4a\x43\xc1\x96\x0c\x87\x0a\x48\x76\
\xce\x7f\xe6\x92\x9c\xbb\xcc\x58\x70\x4e\x4b\xce\x96\x9a\x8a\x25\
\x0b\x52\x12\x0a\x19\x59\x79\x8d\x11\x8a\x07\x94\x6c\x04\x72\xdb\
\xa2\x39\x22\xa6\xe2\x4f\x6c\x79\x9b\x39\xd0\x31\x95\x21\xc7\xad\
\xa8\x0c\x2c\x29\x59\xd3\x01\x15\x57\x2c\xd8\x50\x50\x86\xa6\x70\
\xcb\x8a\x2d\x17\xa1\x24\x1c\x31\xc1\x90\x30\xe7\x06\x57\x3c\x94\
\xc1\x5c\x9f\xd9\x5a\x22\x22\xe2\x50\xca\x3f\x07\xc5\xf6\xb7\x7e\
\xcb\x05\x5b\x92\xe0\x05\x22\x26\x58\x19\x23\x32\xe4\x8c\x88\x25\
\x65\xfd\x67\xbe\xe6\x94\xd7\x98\x73\x9f\x7f\xa2\xa6\x21\x16\x81\
\x9c\x9a\x04\x23\x91\x7b\xca\x84\x88\x88\x1c\xcb\x25\x5b\x5a\xb6\
\x14\x4c\x78\x8d\x9c\x4b\x1e\xb3\xc5\xf2\x1e\x39\xd0\x4a\x92\xa6\
\x45\x84\xab\x95\x72\xbe\x65\xc3\x39\x0b\x39\xf4\x2e\x10\x83\x5a\
\x09\xe4\x86\x96\x8d\x50\xd6\xa7\xdc\x60\xc6\x84\x63\x0c\x4a\x26\
\x00\x17\x94\x81\xec\x94\xbe\x48\xd1\xe5\xa4\x6e\xeb\x24\xdb\x6b\
\xc4\x21\x41\x82\xa1\x65\x45\xce\x31\xb7\x39\x92\x2a\xf4\x26\x17\
\xc4\xb4\xac\x78\xc8\xd7\x14\x3c\x61\xc6\xeb\xbc\xce\x58\x34\x29\
\x6f\xb0\x66\x45\xc3\x25\x17\x1c\xf1\x2e\x2b\x62\x26\x52\x29\x1a\
\x0a\x72\x3a\x2c\x05\xff\x42\xcd\x6b\x8c\xf8\x0b\x8a\x53\x6e\x31\
\xc7\xb2\xe0\xb1\x0c\xb7\x16\x5c\x72\xc1\x25\x1b\x56\x22\xed\xd2\
\x77\x59\x2a\xb4\x88\xed\x28\x5a\x6a\x2e\x88\xb8\x62\xc1\x8c\x0c\
\x45\xc6\x0d\x2a\x0c\x5b\x0a\xb6\x01\x40\x4c\x06\x03\x34\xcf\x34\
\x84\x01\xba\x8f\x3b\x62\x52\x41\x7e\x12\xf9\xb0\x2f\x85\x16\x4c\
\x99\x32\x23\x13\x85\xb0\x7f\xc7\x6d\x3a\x22\x0a\x72\x6e\xb3\x66\
\x49\xc6\x88\x31\x13\x5a\x36\xac\xf9\x13\x6b\x9e\x70\x49\x89\x25\
\x61\x44\xce\x88\x23\xe6\x8c\x48\x29\x58\x90\x30\xc7\xf0\x17\xfe\
\xc4\xd7\xbc\xc3\x1b\x7c\x8b\xe1\x27\x44\xdc\xc2\x72\xc1\x1f\x69\
\x80\x9a\x4b\x1e\x73\x49\x85\x65\x45\x3c\x28\xc8\x3a\x2a\x1c\x15\
\x35\x63\x46\xc4\xc4\x62\xe0\x92\x52\x38\x5e\x13\x20\xa7\x13\xd6\
\xb7\xa2\x93\xb9\xb2\xe7\x1a\x42\x61\xe9\x3e\xb6\xc4\x21\x0f\xeb\
\xe1\x8e\x94\x31\x13\x2a\x72\x32\x01\x4f\x62\x52\x32\x46\x6c\xa9\
\x69\xb8\x43\xc6\x8a\x88\x09\xc7\xe4\xd4\x5c\x70\xc6\x9a\x25\x37\
\xb8\x62\xcb\x98\x77\xd8\x70\x9f\x0d\x11\x67\x4c\x38\xe5\x94\x9c\
\x88\x96\x11\x29\x96\x35\x97\x54\x9c\xf3\x57\x14\x0f\xd8\xf0\x73\
\x12\xfe\x85\x7b\x3c\x14\x91\xcf\x15\x8d\x88\xae\x25\x8c\x49\x50\
\x74\xd4\xd4\xa4\x44\xe4\x4c\xc8\x50\x54\xd4\x42\x5d\x69\xa4\x4b\
\x1b\x09\xf0\x3b\x22\x97\xaa\xb9\x44\x13\xfd\x5e\x89\x54\x97\x7d\
\x76\x42\xe5\x70\xd8\x5f\x76\xa2\xfb\x93\xc9\x13\x81\x9c\x88\x88\
\x4e\x9e\x87\xcf\xcf\x0c\x29\x1b\x72\x34\x1b\x19\x7c\x4e\xb9\xc9\
\x94\x89\xa4\x3b\x05\x35\x53\x6e\xb0\x22\x67\xc6\x98\x25\x25\x77\
\xa8\xe4\xc7\x97\x68\x52\x5a\x5a\x22\x6e\x93\xb0\x61\xc3\x7d\x2c\
\x8a\x96\x8a\x02\xc3\x88\x13\xd6\xac\x58\x70\xc5\x56\x72\xd9\x58\
\x0e\xb7\x17\xee\xeb\x64\xc2\x37\x25\xa2\x1d\x00\xc6\x95\x8c\xee\
\x5b\x16\x18\x32\xc6\x4c\xa4\xf3\xd1\xa1\x89\x3e\x7a\x01\x67\xd9\
\xc9\x29\x50\xc2\x7f\xa8\x68\xa9\x65\xc2\xb2\x11\x18\x3c\x21\x96\
\x9a\x73\x87\x09\x77\x34\x94\xa1\xfd\xae\x88\xe4\x49\xdf\x20\xe2\
\x82\x19\x35\xff\x85\xc7\xd4\x9c\x02\xc7\xfc\x1d\x47\x9c\xb3\x20\
\x66\x8e\x25\xe3\xe7\x4c\x58\xf2\x9f\x38\xe7\xe7\x18\xee\xf2\x6f\
\x79\x8b\x31\x6f\x70\xc2\x4f\xb8\xc7\xff\xcb\x15\xad\x74\x30\x62\
\x20\x23\x66\xc4\x94\x09\x09\xf7\x25\xcd\x8a\x98\x73\x83\x19\x1b\
\x0a\x89\x32\x9e\x1f\x5c\x8a\x50\x57\x4a\x4b\x21\x28\xbc\x79\x91\
\x4e\x57\x24\xa1\x31\x93\x52\x65\x1c\xb4\x80\x7a\x1a\x48\x1c\xf0\
\x49\x3f\x22\xe0\x85\x51\x5a\xf9\xd1\x33\xe1\x6c\x6f\xf1\x3a\x43\
\xc7\xb4\x9c\xf2\x0d\x97\x4c\x99\xb3\xe5\x26\x11\xb7\x79\x9b\x29\
\x29\x17\x38\x32\x1a\x3a\x8e\x78\x8f\x35\xff\x89\x07\xfc\x8c\x11\
\xb7\xb9\xcb\x9b\x58\x52\xa6\x5c\x32\xe2\x4b\x62\x4e\x59\xb2\x41\
\xf3\x96\x0c\xdb\x26\x94\x7c\xcb\x13\x96\x24\xc2\xf6\xcb\xc8\x99\
\x32\xc2\x89\x4e\xae\x57\x4f\xb6\x02\x17\x68\x26\xe4\xc0\x4a\x1e\
\x9a\x0b\x3d\x97\x67\x76\xc3\x55\x38\x82\xbd\x6c\xa2\x15\x3a\x96\
\xd7\x14\x8d\xa5\x35\x67\x03\xbd\xc3\x08\x09\x23\x26\xe5\x88\x63\
\xe6\x4c\x98\x32\x27\xa5\x63\x8d\xe1\x36\x6f\x71\x4c\x42\xc6\x31\
\x25\x31\x6f\xf0\x3e\xaf\x73\x44\x8a\x61\xc4\x5b\x18\x6a\x4e\x19\
\xd3\x31\x67\x45\xc4\x11\x59\x98\xdd\x54\xe4\xcc\x19\x49\x4f\x2d\
\x62\x84\x62\xcb\x05\x17\x6c\xb0\x32\x79\x3a\x95\x57\x51\xb3\xa0\
\xa2\xa1\x0a\x59\xe5\x5a\x06\x16\x3c\xbc\x57\xb2\xe6\xbf\x73\x49\
\xf7\xa9\x83\x01\x0d\xe5\x19\xd5\xa7\xc7\x12\xdb\xc0\xb7\x4e\xc8\
\xa4\xe4\xed\x30\x64\x8c\x64\xb6\xd2\x05\xaa\x71\xff\xdf\x11\xad\
\x8c\xcb\xfb\x03\xb8\xa6\x22\x65\xc4\x9c\x39\xef\xb1\x02\x34\x5b\
\x8e\x78\x8f\x9c\x0a\xc5\x11\x29\xf0\x3a\x57\x34\xcc\x59\xa0\x78\
\x0b\xc3\x4d\x5e\x93\xe2\xc8\x8a\xa0\xeb\x11\x89\x88\xbc\x65\xa4\
\x94\xd4\x64\x1c\x73\xcc\x39\x05\x47\xcc\xb9\xa4\x12\x44\x2b\x21\
\xa6\xa3\x10\x0d\x23\x4d\x21\xe7\xe1\x8a\x11\x11\x73\x52\x2c\x0d\
\xab\x4f\x2e\x3e\xd9\x72\xc4\x0d\x15\x7f\x97\x8f\xb0\xe8\x7f\x4c\
\x7f\x69\x24\xe0\x78\xe0\x7d\x2b\x3e\xba\x63\xca\x28\x08\x66\xea\
\x40\xf3\xf1\x0d\x39\x3f\xc2\x3e\x46\x93\xca\x41\x2b\xb8\x8d\x22\
\xe1\x06\xef\xb1\x66\x45\x29\x99\xdd\x92\x15\x25\x23\x8e\x29\x25\
\xc9\x69\x68\x38\x26\x67\xc2\xcf\x79\x8d\x34\x50\xd5\x62\x66\xdc\
\x26\xa7\xa6\x25\x62\x4a\xcd\x9a\x96\x1b\xfc\x3d\x6f\xf0\x47\x2e\
\x69\x58\xb1\xc5\x91\x92\x61\x98\x73\x83\x35\x4f\xd8\x02\x33\x14\
\x1d\x25\x8e\x9a\x25\x86\x8a\x8a\x29\x1b\x2e\x79\xc4\x13\x9e\xf0\
\x36\xa9\x3b\x51\xee\xd9\xe1\x53\x13\xff\x2a\x76\x36\xa4\xd1\xbe\
\xd5\x87\x94\x2d\x99\x88\x27\x32\x50\xb4\x55\x01\xaa\xc9\x59\x09\
\x0c\x97\xd1\xe1\x18\x91\x0a\xc2\xe5\xb9\xbb\x8d\xa8\x3a\x24\x52\
\x53\xae\x98\xb1\x60\xcc\x11\x4f\x28\xf9\x09\x0b\x7e\xca\xfb\x4c\
\x84\x13\xe9\x95\x28\x46\xbc\xcb\x1b\x3c\xa0\x22\x25\x13\x24\xc4\
\xa0\x30\x9c\xf0\x36\x5f\xb2\x02\x66\x9c\x10\xe3\xc8\xb8\x49\x4a\
\x25\x91\xc9\x49\x3b\xd0\x33\x3b\x1e\xb0\x25\xa6\xe2\x18\x45\xc1\
\x8a\x95\x0c\x70\x3e\x13\xb3\xec\x59\x28\x5e\x83\x70\x4d\xc5\x16\
\xcb\x14\x2d\xe5\xae\x09\x83\x08\x43\x42\xb0\x95\xce\xe3\x29\x8e\
\x25\x23\xd2\xa0\x60\x3b\x22\xc7\xb0\xa6\x26\x65\x42\x4d\x41\x1b\
\x84\x7c\x6b\xa0\xe0\x98\x39\x67\x74\xbc\xcf\x92\x9f\x33\x17\x08\
\x30\xc2\x91\x70\x8e\x66\xc6\x5b\xfc\x15\x87\xa2\x91\xeb\xa4\xf8\
\x9a\x86\x23\x7e\x41\xce\x7d\xb9\xb4\x8e\x29\x27\xc4\x8c\xb8\x45\
\x4a\x11\xf8\x94\x29\x13\x72\x6a\x12\x46\xc4\x14\x6c\xf8\x26\xf4\
\x46\xdd\x77\x23\x54\x3e\xcd\xf6\xdd\x44\x0f\x7f\xa4\x4c\x44\x04\
\xab\x0b\xf4\x9e\xbe\x17\xa9\x03\x2f\xd7\x90\xf0\x0b\xfe\xcc\x9a\
\x96\xa9\xa0\xdd\x0d\x19\x19\x8e\x9c\x44\x78\xd3\x1b\xd6\xb4\x58\
\x81\xe4\x96\x24\x4c\x50\x54\x18\x8e\x38\xe1\x88\x48\x5c\x71\x4d\
\x49\x4d\x4c\x49\xc3\x88\xdb\x68\x0a\x0a\xa9\x14\x12\x4a\x56\xdc\
\xe0\x94\x8c\x8c\x19\x6b\x16\x64\xbc\xcd\x11\x8d\x38\xdb\x25\x57\
\x62\x4e\xcd\x88\x63\x62\x26\xdc\xe6\x04\xc5\x96\x25\x35\xb3\xbd\
\xf9\xe1\x6b\x13\xaa\x2e\x70\x27\x55\xa0\x67\x29\x99\x90\xea\x02\
\x53\xc9\x86\xa4\x6a\x9f\x5a\xfa\x53\x56\x2c\x69\xc3\x90\xe3\x09\
\x9a\x96\x8e\x94\x88\x8a\x25\x05\x8e\x94\x51\x38\xb8\xdf\xf2\x06\
\x89\x74\x28\x15\x37\xe4\xda\xf9\xe2\x6b\xc3\x86\x63\x1e\x53\x01\
\x73\x62\x96\x3c\xa1\x40\x91\x4a\x16\x61\x59\xd1\x30\xe3\x16\x15\
\x5f\x93\x71\x47\x94\xf0\x0c\x39\x25\x8a\x44\x86\x2f\x2d\x8a\x84\
\x39\x6f\xf3\x36\x31\x5f\x72\x0e\xcc\x99\x5f\x33\xe2\xb6\x67\x88\
\x9e\x5d\x5b\xcb\xb7\x84\x96\x84\x1c\x45\x2d\x9d\x22\x85\x15\xe2\
\xd0\x8e\xb2\xd3\xf7\x13\x97\xbc\x4b\xc2\x9f\xb8\xcf\xbb\x9c\xd2\
\xb0\x24\x92\xf9\xbb\x2d\x2d\x1a\xc7\x98\x4c\x5a\x3a\x1d\x5b\x5e\
\x47\x53\x92\x31\x63\xcc\x48\x8c\xa7\x02\xfb\x21\x91\x56\x8e\x62\
\x42\x22\xd5\xa6\xa3\x65\x4d\x42\x46\x4c\x8d\x16\xbd\x02\x47\xc5\
\x23\x22\x2e\xa4\x8d\xec\xd5\x6e\xc0\x31\x21\x27\x23\xa7\xe2\x09\
\x27\xe4\xc4\xdc\x62\x8b\x61\xc6\xe4\xfd\xf6\xc0\x18\x7b\x3e\xa2\
\x07\xcf\xbd\x3f\x68\xc8\x38\x22\x13\xe1\x94\x72\x8f\x4b\x7b\x5d\
\x3f\xb9\xc6\x31\xe3\x5d\xce\x78\xc4\x82\x11\x33\xc1\x0a\x12\x69\
\xca\x7b\xca\xea\x15\x17\x94\x20\xc2\x4c\x23\x46\xa2\x2d\x60\xf6\
\xc0\x80\xbe\xfb\x19\x8b\x5a\x89\x21\x63\x83\x96\x74\xde\x1b\xad\
\x24\x21\xe7\x84\x82\x96\x9a\xf5\x80\x9e\x10\x4b\x7e\x33\x16\x35\
\xe5\x98\x11\x63\xc1\xb1\xbc\x19\xf5\x3d\xf3\xdd\x3e\xa2\xfb\x78\
\x37\xa8\xd4\x0a\xfa\x90\xd2\xd2\xb0\x0d\x33\x76\xea\x19\xed\xc1\
\x84\x9a\x09\x33\xe6\xdc\x63\x89\xe3\x98\x8a\x86\x42\xe6\xc5\x6b\
\x0c\x2d\x05\x8f\x39\x47\x73\xc4\x4c\x84\x37\x46\x64\x72\xa6\xa2\
\x01\x39\xd4\xc9\x30\xdd\x88\x31\x31\x05\x86\x54\x84\x3f\x9d\xa0\
\x14\xbd\x20\x64\xca\x84\x92\x15\xad\xe4\xb8\xad\xcc\x25\x1a\x66\
\x8c\x30\x68\x12\x22\xc6\x8c\x04\x97\xcf\x18\x93\x5d\xc3\x93\x38\
\x48\xa8\xb8\xa7\x05\x91\x8c\x49\x18\xcb\x19\xe8\x84\x98\xe3\x06\
\xbd\xa2\xa7\xf9\xd4\x09\x35\x96\x8c\x5b\xb4\x3c\xa6\xe3\x01\x05\
\x5b\xea\x30\xc8\xf0\xae\x08\x7e\x46\xcc\xb8\xc5\x4c\x44\x5b\x7b\
\xd1\x3e\x27\x1d\x8d\x5e\x86\x4f\x49\xcd\x33\x16\x71\x8d\x98\x89\
\x08\x80\x55\x6c\x88\x25\xd3\x6c\xa5\xfb\x92\x32\x62\x2d\xf5\x8f\
\xef\xa0\xc6\x72\x31\xf5\x80\xbf\xef\x55\x32\x27\x64\xd7\x50\x0c\
\xa3\xfd\xb9\x0c\xf3\x45\x8c\x96\x14\x66\x42\x8e\x15\x03\xf4\x73\
\xbe\xee\xda\x86\x99\x12\x6c\x19\x81\xe1\x66\xa2\x42\xd9\x49\xfe\
\xd0\x33\xa7\x7c\xf9\x1e\x31\x61\x12\xc6\x1c\x74\x98\xf3\xeb\x82\
\x46\x59\xaf\x03\xe1\x03\xe0\x98\x05\xe0\xb8\xc5\x5a\x4e\x85\x57\
\xa4\xf2\x3d\x95\x26\x44\xbb\x4a\x46\x5f\x53\x72\x52\x22\x36\x22\
\xd8\xd5\x33\x7b\xfc\xe4\x57\xca\x8c\xe4\x79\x4d\x60\x87\x22\xba\
\x4c\x8e\xb5\xa8\xce\x26\x82\x29\x7b\xee\xd4\x6e\xc0\x4c\x3d\x35\
\xee\xea\xbb\xcf\x39\xb0\xa5\x24\xe2\x88\x19\x5b\x22\x22\x21\x26\
\xf6\x7d\x68\x07\xa4\x22\xb5\x91\x1c\x4c\x89\x6e\xe5\x27\xec\x8f\
\xa5\x8c\x39\x92\x7e\x45\x42\x22\x6d\xc0\x9a\x0d\xb1\x34\x00\x6c\
\x60\x61\xdd\x1e\xcc\x18\x3a\x2c\x39\x63\x51\xca\xb4\xf2\xf3\x2d\
\x8e\x11\x13\x92\xff\xc0\x77\xe5\x11\x86\x9a\xee\xae\xfe\x22\xfd\
\x70\x24\xc2\xbb\x2e\x8c\x86\x20\x8a\x61\xc3\x59\xbb\xc3\x19\xd1\
\x58\xe4\xf2\xb4\xcc\x56\xa5\x72\xdf\x77\xf4\xe0\x58\x14\x0a\x3d\
\xcf\x2d\x3a\x18\x67\xd9\x5d\x89\x3e\x73\xf5\xc5\xde\x09\x4b\x96\
\x14\x22\xeb\x92\xd1\xb2\x65\x8b\x23\x97\x11\x25\x83\x62\x22\x3d\
\xf9\x4a\x48\xea\x8e\x96\x5b\x4c\x85\xea\xd4\x49\x61\x8e\x60\x2d\
\xc9\x35\x4d\x9e\x83\x06\x4f\xf3\x0f\x5e\x74\xbb\x91\x79\x28\x82\
\x97\x28\x06\xa3\xc9\xd7\x83\x1b\x49\xd0\xb6\xd7\x34\x34\xd2\x47\
\xf0\xd5\x6c\x2b\x45\x54\x24\xe9\x3a\x61\xb0\xda\x49\x2f\xc5\x86\
\xb1\x84\xa1\xac\x82\x23\x66\xcc\x8c\x0c\x43\x8d\x62\xcc\x5c\x62\
\x58\xcd\xa5\x0c\xec\xa7\x4c\x98\xa0\xd9\x08\xa1\x54\x49\xdd\x39\
\x65\x22\x17\x89\x81\xb4\x57\x32\x88\x50\xcf\xbc\x1a\x11\xe6\xf3\
\xe5\x27\x96\x09\x19\x1b\x3a\x1e\x10\x71\x2c\x7a\xb3\x06\x45\x8d\
\x22\x0d\x2c\x7d\x0e\x26\x6c\x6a\x8c\xa0\xc4\x7d\x08\x4e\x43\x48\
\xd3\xe4\xe2\x15\x62\x59\x13\xa1\xa4\xcd\xdb\x89\x6f\xb0\xc2\xaa\
\x26\xf8\x8d\x9e\x91\x9f\x30\xe7\x98\x05\x2b\xd1\xc7\x9e\x71\x07\
\x4b\xc9\x23\x1e\xf3\x95\x6c\xdb\x38\x66\xc4\x39\x15\x0d\x11\x27\
\xbc\xc9\x1b\xcc\x59\x90\xb3\xa0\x90\x86\x94\xe7\x5c\x6c\x51\x8c\
\xdf\x37\xd7\x08\x30\x1d\x14\x5d\xfa\x73\xfd\x89\x95\x09\x4f\x0f\
\xb8\xd5\x1c\xcb\x88\xa0\x67\x2c\xf9\xb7\x13\x5d\xe3\x2c\x4d\xf0\
\x33\xbd\x12\x90\x66\x28\x0d\xea\xbb\x1c\x1a\x25\xc0\x48\x2b\x14\
\x94\x4e\xbe\x4a\x0f\x58\xff\x4a\x1a\x37\x7e\x9e\x74\xcc\x8c\x63\
\xd1\xcb\x36\x54\xc4\x64\xcc\xb8\xc1\x86\x25\x2b\x2a\x1c\x6b\x36\
\xe4\x8c\xc8\x99\x71\xc4\x18\xc5\x86\x0a\x44\x92\xa7\x57\xd8\xed\
\x06\x93\x1d\xea\xbb\xae\x86\xc2\xdc\x8b\x64\x58\x29\xa1\x10\xd7\
\x67\x84\x2d\x77\xc6\x88\xa9\xbc\xdc\x4e\xc6\x8f\x87\x54\x63\x13\
\x46\x9a\x4c\x18\x91\x76\xe1\xb0\x9b\xa0\x13\xd5\xb3\x21\x1b\x5a\
\x1a\x5a\xf9\x0a\x13\x92\xfa\x66\x6f\x14\xbe\x23\x22\x61\xcc\x94\
\x0b\xd6\x54\x28\x32\x1a\x72\x66\xcc\x83\xc2\x88\x57\x63\xd6\xe4\
\xcc\x99\x93\x63\x59\x53\xa2\xa8\xe4\xf5\x67\xd2\xb2\x6e\x7c\xc2\
\x78\x97\x7b\xcf\xa5\x05\x44\xa4\xbf\xe7\x43\x43\xf4\x99\xfe\x7c\
\xf6\x95\x27\x0b\x2a\xb9\xe7\x0b\xe9\x5d\x49\xa0\x7d\x26\xe7\x4a\
\x07\xa5\x97\xdd\xfc\x84\x11\x5a\x49\x2b\xd5\x8a\x17\xdc\x23\x8c\
\x37\xf7\x1c\x5d\x2b\x24\x34\x2b\xd4\xe1\x9d\x86\x44\x44\xca\x15\
\x5b\x22\xc6\x92\x34\x79\x31\xe1\x23\x5a\x69\x19\x6e\x03\xe5\xd0\
\x3b\x4d\x2f\x3a\xef\x0d\x14\x0b\xaa\x62\x51\xa8\x7b\xd7\xcd\x19\
\x46\xfb\xdc\x6b\x43\xfa\x11\xbf\x73\x77\xd5\x3d\x7d\x6f\xfa\xeb\
\x29\xa3\xdf\xae\x24\x11\xf1\x14\xcf\x5e\x52\xd5\x8b\x9b\xb0\x97\
\x6c\xbb\x40\xcf\xd1\x81\x1d\xb9\x53\x18\xf1\x2c\x89\x3a\xa0\x05\
\x9d\x68\x8c\x45\x81\x6a\xee\x67\xc6\x62\xd9\xd5\x55\xc2\x80\x6f\
\xd9\x9f\xa8\x5a\x34\x95\xbd\x66\x9d\x92\x04\xca\xb3\xab\x67\x41\
\x18\xb4\x15\xd2\x50\x1d\x66\xba\xb4\x28\x6d\x6a\x12\xf4\x3d\x9e\
\x3f\xaf\xa1\x88\xb0\x1f\xb5\x9f\xda\x0f\x40\x7d\x11\xdf\xe3\x1e\
\x7f\xf0\x92\xae\x3a\x30\x18\x5d\x80\x73\x87\x53\xdf\xc3\x49\x60\
\x42\x5b\xc5\x89\x1c\x46\x07\xf2\x06\x1a\xc9\x1d\x2d\x91\xac\x84\
\xb0\x81\x50\xda\xd1\x89\x7a\x89\x8f\x53\x43\x53\x46\x02\xc1\x95\
\x2c\x31\x24\x24\x24\xb4\x32\xf9\xab\xf1\xa3\x56\x08\x9e\xe2\xc7\
\x96\x7c\xa9\x88\xc4\xbe\x2d\x0b\x59\x48\x60\x9e\xef\x23\xc2\x44\
\xe6\x5d\xfb\x81\xfd\x80\xe3\xee\xf7\xed\x87\xfe\x09\x1a\x26\xd4\
\x02\x89\xc6\x52\x0d\x46\x83\x45\x74\x6a\x8f\x89\xbf\x3f\xc7\xdd\
\xcf\xe2\x39\x19\x80\xe9\xf5\x08\xc7\x42\xf4\xe8\xbf\xce\x8b\xf8\
\xb8\x30\xe7\xdd\x08\x36\xe6\x6b\x8b\x02\xcb\x98\x8a\x35\xe7\xc0\
\x24\x08\x41\xea\x30\xaa\xf0\x68\x30\xaf\xe1\x85\x40\x5b\x09\xdd\
\x1d\x1d\x1b\x96\x34\x02\x15\xf1\x3c\xa2\x48\x98\xa2\xba\xeb\x8e\
\xa1\xc1\x7e\xd8\x4a\xd9\x1d\x33\xa3\x94\x9e\xe7\x88\x4c\x78\xb2\
\x6e\xf0\x66\x87\x03\x6b\x3a\x70\xe2\x94\x98\xa1\x92\xc3\x6e\xe5\
\xd8\x26\xc4\x92\x6e\xa9\x10\x33\x96\x40\x22\xac\x59\x1b\xe4\xc8\
\x5b\x2a\xb6\x2c\xb8\x92\x55\x34\x4b\xb6\xa4\x34\x83\x8c\xd4\x8a\
\xf6\xd8\x46\x7c\x91\xbf\x46\x48\x03\x49\x89\x8c\x9f\x37\x65\x1c\
\x14\xd5\x9f\x43\x14\x11\x48\xff\x57\xcd\x3f\xd8\x5f\xee\x18\xab\
\x1d\x39\x6f\x50\x49\xc9\xac\x68\x45\x7a\x47\x0d\x38\xfc\x4a\x6e\
\xb3\x15\x38\xc4\x4a\x6b\xad\x13\x55\xca\xde\x39\xf6\xfa\x75\x11\
\x0d\x95\xe8\x9e\xf6\x18\x44\x43\x21\xaa\xd9\x7e\xed\x80\x1f\x48\
\x58\xf2\x0d\xf7\x39\xa7\x25\x13\x7e\xe6\x8a\x33\x26\x28\x34\xb9\
\xcc\xff\xa8\x30\xc6\xe0\x49\xa8\xad\x40\x3c\x15\x19\x19\x23\x60\
\x43\x8d\xe5\x16\xf3\x30\xde\xaf\xbe\xdb\x47\xf8\xab\xe1\x7e\xd9\
\x4a\x3e\x80\x48\x78\xb6\x6c\x64\x6c\xc5\x1d\x0c\xb2\x33\x18\x79\
\xb4\x61\xc8\x44\x85\x3c\xd4\x49\xba\xdc\x05\xef\x1f\xe1\x68\x18\
\x03\x9a\x8a\x0d\x57\x94\x28\x51\xaf\x8d\x84\x9e\xa6\x65\x98\xf1\
\x09\xdf\xf2\x80\x2b\x11\x1a\xf6\xb3\xc8\x25\x0b\x9e\x30\x96\x82\
\xae\x7f\x43\x47\x41\xa7\xdb\x0a\xa3\xc7\xbb\xe5\x94\x18\x47\x41\
\x47\x16\x3c\xc4\x73\xc2\xe7\x21\xf3\xb6\xa7\x9c\x1b\x52\x26\x81\
\xff\x78\x38\xdb\x4d\x50\x88\xda\x5d\x2f\x7f\x5b\x1b\xc1\xb1\x7b\
\xce\xb4\xe7\x44\xe6\x44\xa2\xa1\xde\x51\x70\xc6\x9f\xf8\x8a\x2b\
\x3a\xc6\xdc\xe1\xe7\xbc\xcb\x11\x48\x73\xb9\xe6\x82\xfb\x7c\xcd\
\x19\x05\x8e\x5a\xd6\x0b\x40\xcd\x8a\x0b\x4e\xc9\x28\x84\x18\x6b\
\x80\x29\x95\x44\x8d\x5a\x94\x66\x3c\xf9\xcd\xf7\xef\x37\x28\x8e\
\x39\x21\xf9\xb5\xbe\x76\x68\x37\xba\x4e\x65\xc8\xdc\x33\xbf\x77\
\x1f\x76\x61\x8e\xd6\xff\x6d\xb2\x47\xf8\xe6\xa9\x89\xbd\xc3\xa1\
\xd5\xdd\x3c\xa7\x0f\x7a\x06\x4d\x2a\x50\xae\xc6\x70\xc1\x86\x47\
\x7c\xc9\x7f\xe5\x3e\x25\x86\x9c\x85\x3c\xf3\x84\x42\xf4\xf0\xae\
\x38\xe3\x9c\xc7\x54\x68\x0c\x95\x68\xe9\xf7\x8b\xaa\x52\x72\xf9\
\x0a\x17\x76\x7a\xb5\xb2\x58\x62\x37\xa5\x9e\xc8\xfe\xd0\x13\xee\
\x70\x93\xe4\x73\xf5\x22\x3e\xa2\x97\xe0\x52\x1f\x45\x94\xce\x4f\
\xe0\xd9\xa0\xda\xb1\x1b\x23\xd8\x65\x96\xfd\x84\x94\x1a\x4c\xe6\
\xb8\x60\x2c\x4d\xcd\x56\x18\x2b\x7e\xcf\x8a\xef\x9f\x26\xb2\x27\
\xa3\x64\x03\xdc\x60\xce\x29\xb7\x69\x50\x9c\xd1\x92\x63\xa9\x78\
\xc2\x9a\x0b\x1e\xf1\x0d\xe7\x34\x02\xbc\x19\xaa\x90\x94\x17\x94\
\xb2\xf7\xb1\xd7\x0f\x68\x04\xd9\xf4\xf9\x45\x25\x72\x1a\x19\x0d\
\x6b\x11\x17\x9c\xff\x63\x34\x08\xf5\xcf\x21\x9c\x2a\xf1\xbc\xf5\
\x9e\x3e\x90\x12\x4e\x15\x07\x33\xbf\x3b\x51\x83\xee\x80\xd3\xdf\
\x0a\x93\xa1\xa0\x09\x7a\x03\x59\x88\x12\x31\xc7\x54\xe4\x8c\xb8\
\xc9\x88\x9f\xf0\x1e\x37\x59\x73\xc5\x96\x8a\x73\x2e\x58\x52\x72\
\xc1\x19\xe7\x5c\xb0\xc5\x49\x85\x63\xa5\x83\xb5\x2b\xed\x3b\xc9\
\x44\x7d\x70\xee\x42\xb2\x6f\x28\x04\xe3\x8c\x58\x71\x85\xc2\x30\
\x25\xff\x95\x7a\xbe\x8f\x18\x1e\x19\x8d\x23\x7e\x1f\xcc\xa7\xd1\
\x87\x4e\x9e\x66\x21\x23\xe6\xfb\xcf\x7d\x98\x57\xee\x8f\x44\xd7\
\xa2\xad\xee\x88\x85\x61\xbb\x43\xaa\x3b\x51\x9a\xbc\xc1\x5d\x6e\
\x91\x70\x9b\x29\x1d\x63\x46\x94\x3c\xe6\x09\xf7\x39\xa3\x63\x21\
\x53\x19\xfd\xd5\xf3\x1d\x38\x15\x0a\xbf\x48\xc2\x25\xe2\x8e\xdb\
\xb0\xd9\xa1\x6f\xe1\x68\x32\x0c\xe7\x2c\xb8\x49\x17\x84\xdb\x5e\
\x68\x12\x78\x07\xc6\x8d\xee\x81\xfb\x68\x37\x4f\x7b\xee\xf4\xc0\
\x63\xa8\xa7\x74\x40\xf4\x40\x92\x51\x05\xde\x84\x57\x17\xed\x71\
\xea\x5e\xbb\xba\x11\xec\xeb\x16\xa7\x82\x55\xc4\x94\x02\xd3\x97\
\x2c\xd9\x50\x73\x45\x41\x4b\x02\x68\x09\xd8\x3d\xb5\xd9\x08\xd6\
\x19\x49\xab\xc6\x84\xb8\xe4\x27\xc5\x6b\x01\xf0\x7a\x95\xcb\x2d\
\x85\xcc\x31\xf7\xa3\x57\x2f\x30\xe5\x77\xa8\x24\x67\x42\x9d\x91\
\x0d\x86\x81\x5c\xc0\x13\x76\x9a\x0e\x76\x4f\xa8\xc0\xdf\x61\x4f\
\x46\xcd\x48\xc3\xbc\x56\x9f\x6f\x78\x86\x85\x4f\x81\x0d\x95\x28\
\xa6\x3a\x14\x37\x78\x0f\xc3\x05\x67\x5c\xb1\x09\x13\x7a\xa9\x4c\
\x89\xf8\x81\x83\x29\x73\x4e\xc9\x85\x9a\xd6\x05\xf9\xd7\xbe\xd2\
\xb0\xe2\x33\xee\x70\x0b\x47\xc2\x31\x11\xb1\x68\xa9\xbf\xf4\xb8\
\xe3\xd3\x9f\xda\xf7\xbe\xdc\x30\x0b\x3d\xc8\x2c\xfb\xa3\xd9\x0a\
\x8c\x13\x49\xd3\x68\x28\xed\xe8\x02\x03\xa6\xc7\x3d\xbd\xea\x99\
\x0f\xb3\x8e\x19\x77\x79\x9d\x87\x3c\xe1\x4c\x96\x18\x5a\x52\x69\
\xf8\x65\x18\x8c\x48\x8f\x0f\x05\x7f\x7a\xd1\xdf\xdd\x03\xf1\xe0\
\xaf\x91\x4e\x6c\x7c\xc0\x27\xfd\x1e\xdb\x1d\xa3\xcf\xcc\x27\x26\
\x4c\x59\xed\x9c\xe5\xae\x05\xd8\x0a\x94\xda\x4a\x17\x3d\x91\x9e\
\x05\xb2\x10\x44\x0d\xc6\x67\x9d\xb8\xbe\x4e\x68\x00\x5a\x06\x9d\
\x2a\x3a\x09\x8b\x63\x72\x0c\x57\x42\x15\xf5\x13\x40\x91\x78\x9b\
\x89\x70\x67\xfa\xae\x6c\x27\x86\x70\x83\xa1\xe7\x52\xe4\x60\xda\
\xd0\xb1\xbf\x5e\xef\xe8\x85\x35\x66\x82\x21\x7e\x13\x07\x85\x80\
\xa1\xf4\x81\x3b\x80\xd8\x90\x26\x8c\xa7\x96\x8c\x06\x94\x3e\xf5\
\x94\x3c\x06\x82\x45\x44\xa2\x67\xd4\xc8\x35\xd4\x22\x38\x3e\x63\
\xce\x09\x37\x39\xe5\x26\x13\xc6\xe4\x64\x4c\x98\x33\x11\x9a\x92\
\x09\xc5\x5d\x27\x8f\xa0\x96\x1e\x8c\x15\x10\x27\x91\x93\x68\x18\
\x87\xa7\xee\x5e\x55\x36\xa1\xff\x54\x3d\x50\x12\x1a\x6e\x72\x77\
\xe1\x87\xb9\x90\xdc\x18\x59\x33\xa6\x02\x53\x21\x0a\x43\xd4\xfb\
\x4a\x97\x66\x2f\x18\x6b\xa9\x63\xc0\x31\xe6\x88\x48\x5a\x8e\xc8\
\x38\x63\x24\x30\xbe\xde\xd3\xa2\xe8\xa4\xf4\xaa\x43\x64\xd3\xe4\
\x42\x3c\xdc\xd0\x30\x62\xc4\x0d\xd2\x57\x5b\x2b\x71\xdd\x20\xcb\
\x7e\xa6\xd0\x5f\x92\x9d\xca\x6d\x24\x82\x15\xb1\xec\xe4\x24\xcc\
\x77\x47\x03\x21\x60\xbb\xa7\xfd\xd1\xbf\x0d\x23\xfa\x40\x7e\x39\
\x88\xa6\x63\xc4\x14\x45\x46\x4d\x4d\x23\xac\x59\x27\x88\x85\x6f\
\x08\x74\x81\x9a\xe0\x44\x14\xb8\x0d\x4d\x81\x29\xb9\xa0\x99\x30\
\x61\xce\x4d\x72\xf5\xb4\xb0\xe8\x2b\x18\x62\xe7\x20\xf7\xc7\xdd\
\xbb\x10\x3c\x75\x18\x77\x4b\x24\xde\x74\x03\x01\xb7\x7d\xf5\x11\
\x17\xe6\xc2\x6d\x48\xc8\x9c\x80\x80\x1e\x5f\xc8\x84\x3c\xaa\x44\
\x2f\x24\x16\x77\xba\xbb\x7e\x86\x58\xa2\x9a\x6f\x42\xd7\x41\xea\
\xd3\x60\x18\x73\xc4\x4d\xa6\x1c\x51\x72\xca\x5b\x1c\x7d\x96\x0e\
\x84\x04\xbf\x87\x21\x86\xa5\xba\xda\x83\x60\x5c\x58\xf4\xd1\x1f\
\xd8\x48\xc6\x94\x7b\xc1\x0b\x1f\x24\xd5\x40\x37\x1b\xf9\x8c\x6e\
\x87\xa0\x13\x89\xb4\x93\x47\x13\x46\x74\x6c\x05\x65\xaa\xd1\xb4\
\xc4\x32\x4a\xd5\x85\xc4\xdf\x8a\x80\x4a\x3f\xa2\xdf\xef\x84\xf3\
\x25\xfa\x4d\xde\xe1\x16\x96\x8a\x31\xc7\x2a\x0e\xe9\xf8\x0b\x64\
\x96\xcf\x1a\x6a\xf2\x07\x3a\xfe\x75\xfe\xdb\x22\x80\x74\x51\x20\
\xa8\xea\xb0\x2f\xdc\x05\x1d\x01\x27\x00\xbe\x97\x5f\xcd\x98\x0a\
\x27\xcb\xfb\xfd\x5e\xc3\xde\x85\x69\xf0\xfe\x6a\x39\x99\x1b\x76\
\xa4\x68\xc6\x20\x33\x21\x91\xf0\x40\x7d\xd3\xbf\xa2\x91\x45\xb9\
\x09\x39\x09\x1d\x6b\x5a\xc0\xb2\xc5\x12\x93\x11\x91\xf0\x06\x6f\
\x70\xac\xfc\xf6\x8e\x68\x00\x24\x7e\xcf\x3c\x42\x0d\x26\xb5\xaf\
\xb3\x6c\xbf\x35\xc1\xc8\x3d\x2d\x04\x58\x3b\xa7\xe5\x98\x5b\xbc\
\x87\x96\xbb\xbf\x11\x02\x61\xaf\x5c\xdb\x8a\x30\x67\x22\xfb\xfe\
\x90\x49\x11\x4b\x1e\x26\xcf\x7b\x95\x90\x48\x7e\x8a\xa3\x0e\x72\
\xaf\x7d\x5b\xa0\x0d\x2c\x5d\xdf\x02\x98\x91\xff\x5e\xef\xa9\x52\
\xf1\x43\x38\x4b\x50\x5f\xec\xeb\x56\x5e\x6f\x88\x48\x3a\x57\x5a\
\x66\x6f\x1e\xb2\xe6\x8c\x2b\xf1\xf7\x4a\xf6\x7b\x59\x69\xe4\x1a\
\xe9\x74\xed\x94\xd5\x77\x5d\x57\x2b\x10\x4f\x8a\x93\x8d\x5e\x26\
\x5c\x37\x4b\x5f\x1b\xb7\x52\x67\xb4\x61\xa7\x93\x43\x93\x71\xc4\
\x31\xe3\x8f\x7e\x04\x43\x28\xf4\x3d\xf3\x9c\x45\xa2\x4a\xb8\x2a\
\x48\xdf\x7b\xc6\x5b\xa4\x5c\x61\x18\xf1\x40\x38\x39\x8d\xec\xca\
\xc8\x85\xc1\xd0\xff\xf3\x48\x3c\x8b\x97\x43\xd1\x83\x0e\x18\xd2\
\x18\xb4\x01\x6f\x70\xd2\xf2\x27\xcc\xfb\x76\x01\x97\xea\xe1\xdf\
\x09\x23\xcc\xde\x58\xf4\x0f\x77\x22\x06\x82\xac\xd7\x69\xd2\x0c\
\x83\xa8\x87\xcc\x22\x4e\xb9\x29\x0c\x89\x73\x96\x5c\xb0\xa0\x94\
\xe6\xe1\x2a\xe4\x1c\x29\x63\x32\x8e\xe5\xc0\x37\xb2\xea\xd2\x48\
\xb5\xb9\x73\xc0\xbe\x15\xe0\x67\xfd\x52\xe9\x5e\x79\x3d\x19\x8f\
\x52\x38\xd1\x92\xf0\xad\xc2\x11\xd1\x3f\xb2\xb7\xe7\xe5\x87\x3b\
\x11\x7b\x55\xe7\x75\x31\xa5\x1b\x70\x1e\x32\x01\xe8\x73\xc9\x1e\
\xe7\x2c\x98\xb0\xa4\x11\xcd\x38\x4d\x4b\x21\xa0\xaa\x41\xf1\x84\
\x86\x2d\x05\x55\x90\xd9\xeb\x35\x65\xfc\x36\xcf\x58\x82\x63\xc9\
\x56\x10\x87\x3e\x29\x8b\x06\x5d\x15\x2d\xc5\xf7\x84\x09\xe6\xf3\
\x97\x49\x17\x5f\xea\x97\x1e\x5c\x0d\x77\x4d\xb6\x6e\x07\x93\xa3\
\x51\xd8\xd7\xeb\x4f\x4a\xca\x9c\x98\x93\xa0\x2f\xe8\x04\xaf\x58\
\xb3\xa1\x92\x45\xca\x95\xc0\x70\xbd\x3e\x6d\x8f\x3e\x18\xf9\x0a\
\x23\x73\xe0\x89\xb4\xf4\xac\xd4\x2f\x91\x34\x95\x7b\xc3\x4c\x38\
\x61\x8e\xf9\x7c\xa8\xb8\xfc\x03\x5f\x8d\x67\x7f\x43\x13\x42\xa1\
\x8f\xf3\x3e\x90\x25\x12\x49\x12\x19\x5f\x50\xb2\x6f\xcf\x6b\x90\
\xcc\x89\xb1\x94\xb2\x35\x67\xc5\x13\xce\x59\x4a\x53\xf7\xb1\x30\
\x61\x2a\x69\x0c\xf7\xd4\xa3\x96\x9a\x0c\xcb\x8d\x80\x4f\xf8\x39\
\xaf\x42\xb2\x4d\x88\xc8\x85\x4a\x76\x9d\x3e\xd6\x2b\x19\x62\x5f\
\x6f\x50\x13\xbf\xaf\xbf\x52\xe1\x12\xa8\x00\xc6\xd8\x20\x7c\xb1\
\x13\x66\xb2\x82\x2a\xf5\xbb\x9d\x3a\xd9\x84\xd0\x85\x5c\xb4\xa2\
\x22\x65\xcc\x94\x11\x35\x5b\x40\x73\x2a\xcb\x2f\x4b\x16\x58\x96\
\x9c\xb3\x92\x45\xec\x17\x54\x92\x2b\x28\x26\x68\x96\x1c\x91\xcb\
\x38\x53\xc7\x5a\xf2\x0c\xef\x80\x73\x8e\x38\x51\x6a\xa0\xa5\xf5\
\x3c\x53\xbc\xec\x89\xb8\xa7\x0e\x74\x08\x19\xd0\x8b\x86\x32\xf3\
\xee\x29\x2d\xec\x9e\xd2\x97\x88\xa7\x6f\x43\xc9\xec\xe9\x87\x69\
\xd0\x27\xf2\xe8\x56\x2e\xa3\xd2\x39\x6b\x19\x6c\xac\x64\x80\x1e\
\xe6\xdc\xe4\x26\x73\x26\x41\xb0\xa7\xa3\xc0\x11\x93\x10\x93\x89\
\x84\xcb\xcb\xfc\x7a\xe9\x84\x4a\xb1\x2f\xc1\xb9\x23\x79\x74\x1c\
\x4a\xb9\xee\x84\xbb\x86\x22\x7e\x7d\x6f\xc4\x48\xe4\x6f\x02\xb1\
\x70\x47\x34\xd5\x24\xcc\x85\x99\x63\x85\xc1\x35\x92\xe9\x6e\xdf\
\x80\xbc\xc9\x09\x53\xb2\xf0\xf1\x46\xae\x62\x4c\xc2\x84\x23\x66\
\x5c\xaf\x9b\xf8\x03\x3b\xcb\x43\x45\x73\xb5\x57\x14\xbb\x3d\x49\
\xd7\xfe\x65\x74\x01\xc8\xeb\xcd\xe7\xa4\x17\x6a\x24\x39\x66\x6f\
\xd1\x80\xa5\xa1\xa0\x94\x6d\xf4\x8d\x94\x52\x7e\xbb\x68\x3f\x36\
\x19\x63\x45\xde\xad\x91\xcf\xf0\xdb\x7a\xe6\x4c\x7e\x7f\xbd\xb8\
\xe4\x0f\x74\x35\x74\x20\xef\xf1\x14\xc8\x62\x0f\x8c\x74\x08\xe1\
\xd8\xd0\x1b\x47\x66\x00\xca\x20\x9f\xa4\x82\x08\xdf\x4e\x61\x7d\
\x4d\xcd\x9a\x2b\x2e\xb9\x92\x1d\x7f\x8e\x88\x94\x9c\x99\x9c\x85\
\x2e\x68\x14\xee\x24\x1c\xfd\x10\xf7\x84\xe4\x23\xf5\x52\x97\xe3\
\x25\x37\xc9\xeb\xb0\x16\xd9\xee\xe9\x58\x76\x07\x9b\xb9\xd5\x1e\
\x1f\x13\xc9\x2f\x08\xe2\xbe\x16\x4d\x49\x4d\x23\x0b\x70\x7d\x7f\
\xa2\x15\x2e\x8c\x47\xba\xcf\x28\x44\x66\x61\x43\x8b\x0e\x9c\xaa\
\x8c\x09\x39\x26\x6c\x5d\x42\xa4\x15\x94\x50\x47\xe6\x9c\x32\xdd\
\x23\x2f\xfe\x28\x86\xd0\xbf\xd7\x1f\x22\x44\x0c\x33\x40\xb0\x15\
\x87\xe2\xe0\xd7\xa9\x99\x76\x03\x85\xbb\x3e\x45\x2e\x64\x02\xdc\
\x6b\x0f\xd8\xb0\xdc\x72\x23\x27\xa6\x14\xbe\x5e\x2a\x53\x66\x5e\
\x0c\xcc\x57\x28\x06\x47\xc5\x96\x2d\x85\xd0\x0a\x27\xdc\xe6\x0e\
\x93\x5f\xeb\x17\x48\xab\x5f\xd9\x10\xa0\xd0\x5f\xe8\x0f\x87\x6d\
\x5f\x06\x70\x8d\x0a\xb3\x3d\x6e\x0f\x28\x75\x83\xc9\x52\x17\xe6\
\x7e\xa6\x34\xa2\x49\x50\xe1\x68\x58\xb1\x62\x49\x2d\x0c\xbc\x5a\
\xd2\x6d\x2b\x4a\x32\x31\x23\x19\x75\x8e\x24\xaa\x20\x1f\xf3\xaa\
\x43\x05\x23\xc0\x30\xe6\x98\x39\xe9\xe7\x6a\x0f\x48\x7c\x81\x6b\
\xbf\x7a\x29\x58\xc6\x52\xb1\x72\x57\x2c\x44\xb9\x23\x0a\x44\x9c\
\x7e\x21\x95\x19\x64\x8d\x2a\x38\x57\x7f\xf8\xfb\x55\xb8\x8d\x6c\
\x7d\xee\x64\xf2\xb7\xa4\xa4\x65\xcd\x8a\x73\x16\xa2\x5a\xec\x44\
\xe6\xb9\xef\xc7\xfb\xb9\xb1\xa9\xc8\x05\x26\x61\xbd\x89\x1f\x72\
\x29\x69\x48\x89\x38\xe2\x5f\xf1\x0b\xde\x7d\x7f\x74\xef\x45\xa3\
\xc5\x2b\x46\x0d\x70\x77\x7b\xe7\xd8\x4a\x71\xa5\x06\xad\x20\x78\
\x7a\x99\x04\xa1\x26\x1c\xee\x4f\x71\x54\x72\xae\x22\x69\xff\xb4\
\x34\x8c\xa4\x87\xea\xcb\x29\x42\x87\xd3\xc9\x28\xa6\x9f\x32\xac\
\xe9\x68\xc8\x24\x1d\x2f\x45\xba\x2f\x41\x91\x73\xcc\x8c\xe4\xde\
\xcb\x5d\x8b\x57\x32\xc4\xae\xd6\xac\x65\x3d\x91\x1e\x90\xc9\x7a\
\xfd\x07\x0d\x07\x7b\xba\x86\x86\xf0\x90\x59\x23\xe6\x74\x03\xfc\
\x33\x91\x53\xd6\x0d\xb8\x15\x9d\x2c\x2e\x48\x05\xa1\xb4\xd2\x0f\
\x89\x84\x13\xb3\x11\x36\x44\x42\xe4\x37\x94\xff\x3a\x7e\x85\xf7\
\xf3\xca\x86\x20\x4c\xd8\xc5\xc2\xab\x69\x05\x4c\xd1\xb2\x1f\x67\
\xa8\x83\xbc\xaf\xda\xe0\xeb\xc5\x7e\x59\x54\x2b\x79\x45\x24\x9a\
\xc6\x99\xd0\x8a\x7a\x30\xcf\x67\x13\x0d\x0c\xba\xf3\x0a\x84\x5c\
\x5c\xc9\xd0\x82\x01\x26\xbc\xc6\x2d\xf2\xcf\x79\xe6\xce\x84\x1f\
\xd0\x10\xea\x9e\x0a\x63\x91\x76\x80\x51\xf6\xfb\x90\x5c\x80\xd2\
\xf6\x2b\x54\xb5\x57\x07\x6a\x60\x36\x68\xc9\xb4\x74\xc4\x8c\x85\
\xde\x6a\x64\x67\x43\xbf\x97\x2f\x61\x44\x2b\xe3\x6b\x3b\x31\xd8\
\x2a\x84\xc8\x9e\xf1\x7f\x83\x37\x99\x7f\xa6\x9f\x41\x88\xfd\x41\
\xa3\x06\x83\x89\x70\x5f\x21\xf4\xe9\xb2\x47\x9a\xfb\xb3\x61\x07\
\x7b\x9a\x14\xfb\x8b\xca\x5c\x28\xd9\xd5\x8e\xdb\x49\x47\x1a\xea\
\x52\x9f\x38\xf5\xcc\xcb\x3a\x40\x32\x3d\x89\xb8\x27\x26\x39\xd9\
\xe9\xe7\xab\xdb\x9c\x3b\xdc\x22\xfd\x8d\xbb\x46\xda\xf7\x07\x3f\
\x11\x2a\xd8\x3f\x96\xa1\x14\xff\xc4\xa2\x41\xa6\x61\xd9\x57\x42\
\x1c\x6e\xe3\x73\x83\x31\x07\xcb\x50\xf8\x4b\x09\xf3\xba\x11\xfe\
\x6d\x2c\x86\x68\xc2\xf9\x4a\x43\xfb\xc6\x89\x43\xed\x04\xe2\x51\
\x44\x64\x9c\x48\x22\x45\xd8\x32\xfa\xe3\x5e\x8d\xa0\x62\x6c\xc4\
\x59\x35\x32\x5e\x9c\x0d\xea\x8f\xe1\xda\x4a\xbd\xa7\x8d\xbb\xe3\
\xe0\x45\xec\xd6\xa1\x29\x81\xd9\x6c\x60\x60\xf7\x2a\xc9\xdd\x01\
\x19\xd9\xc9\x89\xb2\x62\x8e\x5a\x96\x1a\xa6\xcc\x99\x91\xa8\x97\
\x6c\xe8\xbe\x6a\xad\xb1\x93\xe8\x8f\x49\xc8\x44\x12\x29\x62\x4a\
\x4e\x21\xf9\x5e\x3f\x3c\xdd\x2b\x9f\xfa\x5e\x44\x24\x29\xd8\xae\
\x8b\xd1\x27\x65\x56\xc0\x7c\x27\xe4\x0f\x47\x25\xd8\x63\xaf\x74\
\xec\xf9\xd4\x1b\x09\xd8\x08\xb8\x57\x07\x66\xb7\x23\xe1\x94\x9f\
\xf2\x9a\x0c\x4d\x1f\x2e\xb0\x50\x3f\x5e\xd4\xe8\x67\x32\x62\xd2\
\x40\x2f\xad\x45\x97\xb8\x17\xed\x4b\x84\xd5\xdf\x4b\xf1\x9a\x70\
\x36\x5c\xd0\xad\x66\xb0\x43\x41\x05\xb2\xea\x90\x4c\x62\x65\xdd\
\x4d\x33\x18\x73\x69\x82\x78\x5b\x3f\xf4\x38\x62\xc2\x9c\xec\x33\
\x33\xd8\xdc\xe3\x7e\xec\xf0\xd9\x17\xd8\x31\x31\x19\x99\x34\xde\
\x4a\x2a\x12\x29\x85\xfd\xbf\xb9\x64\x85\x3b\x3e\x6e\xb4\xb7\x9a\
\x46\x0f\x4c\xd1\x5f\xa9\x36\x0c\xad\x74\xe2\x10\x2b\xa9\x37\x1a\
\x69\x05\xb7\xf2\x7f\x23\x54\xf9\x88\x88\x11\x47\x9c\x70\x4c\xfa\
\x9b\x67\x13\x41\x7e\x50\x43\xb8\x3d\xb4\xb8\x4f\x75\x3b\x4a\x79\
\x79\x6d\xd0\xd4\x37\x18\xe9\x3c\x45\xc1\x11\x12\x98\xb8\xec\x65\
\x99\x0c\x16\x5b\xba\x60\xba\x56\xce\x85\x4f\xbf\xbd\x5a\xaa\x5f\
\xde\x5e\x51\x88\x57\xe9\x48\xd0\xa4\x4c\x38\xe6\x58\xe8\xe6\x1d\
\xea\x15\x62\xc6\x2b\x5e\x8d\x7e\x05\xa5\x97\xfd\x6e\x45\x0e\xd6\
\xc9\xf4\xb6\x93\x37\xee\xe9\x22\x91\xb0\x2a\xa3\xbd\x36\xdd\x4e\
\x65\x12\x38\x58\x80\xb8\xbb\x3a\x48\x17\xa3\xa3\xa1\x60\x45\x25\
\x6c\xfd\x62\xb0\x7f\xc9\x2b\x0b\x4d\x99\x92\xfc\x7e\xc7\xa0\x74\
\x2f\x1d\x37\x5e\xbe\xd6\x08\x75\xa6\x91\xdd\xbb\xde\x3f\xf4\xe6\
\xe9\x04\x52\x6f\x05\x27\xf0\x53\xe6\xbe\xa9\xdb\x53\xce\xa2\xd0\
\xb5\xb4\x92\xa1\xee\x66\x7f\x9c\x9c\xae\xdd\x32\xe4\x35\x50\xb1\
\x65\x2d\x27\xa2\x5f\x26\x60\x24\x6c\x66\xa2\x7f\x13\x7f\xa4\x0e\
\x62\xd4\xdf\x20\xc5\xd6\xc1\x59\x76\x32\x4a\xd0\x0a\x66\x69\x25\
\x9d\xd2\xa1\xfe\xf0\x49\x50\x26\xdb\x12\x9c\xb4\x89\x23\x52\xa9\
\x33\x1a\x59\x73\xde\x27\x69\x8d\x24\xd4\x05\xb0\x61\xcd\x1a\x4d\
\xc9\x5a\x88\x20\x3b\x69\x60\x25\x52\xc4\xa9\x68\x6d\x9a\x01\x26\
\x62\xff\x76\x45\x57\xbf\x33\xcd\x88\x92\x59\x1c\x9a\x3b\x9d\xd4\
\x10\x56\x00\xfc\x3e\xf1\x49\x05\xa1\x56\x02\xed\x2b\x61\x34\x34\
\x72\xe3\x9d\x74\x2a\x6b\xd9\x93\x50\x00\xa5\xb0\xf2\x3d\xac\x57\
\x0b\x60\xe7\x45\x47\x8d\xf8\xa2\xa7\x89\xf2\xee\x60\x01\xef\x0f\
\x60\x88\xa1\x7b\xdc\x29\x9b\xfa\x43\x5c\xa2\x65\x69\xba\x95\xb2\
\xa7\xa6\x46\xcb\x0c\x76\x23\x8a\xb7\x1e\x48\xa9\x65\x54\x35\x15\
\x78\x35\x21\x61\x11\xbe\xd6\xca\x89\x68\xd1\x82\x48\x79\xde\xe4\
\x15\x4b\x6a\x99\xfc\xed\xb3\x58\x9f\x95\xf4\xdb\x04\x73\x51\xcc\
\xcd\xb8\x2d\x43\x94\x4a\x68\xa6\xee\x87\x86\xea\x54\xc0\x79\xf4\
\x60\x25\x25\x03\x8e\xad\x21\x16\xf5\x28\x1d\x6a\x91\x2e\x50\x47\
\x54\x98\xea\xf3\x12\x49\x7d\x83\xb6\x1f\x3b\xb0\x92\x84\x69\x59\
\x6b\x6d\x25\x44\x76\xa2\x6b\xeb\x85\x9e\x0b\x3a\xe9\x86\xd9\x30\
\x2e\xe1\x2f\x52\x27\x2e\x3b\xe6\xfb\xfd\x8a\x9e\xe7\x16\x9f\x3e\
\x21\x84\xbd\x6b\x3d\x01\x3c\x95\x26\x8d\x77\x8d\xad\x88\x26\xfa\
\x99\x2c\x8f\x47\x55\x61\x74\xc0\x61\x44\xbf\xce\x0f\x92\x20\x6a\
\x44\x91\x9c\x26\xa4\xcd\xd7\x70\xc9\x8a\x95\xcc\x73\x55\x58\xaa\
\x81\xe9\xbd\x77\xf0\x17\xc9\xf3\x71\xd3\xff\xf0\xa3\x19\xe2\x90\
\x8c\x2c\x9c\xa8\x4f\x7b\x9c\x40\xef\x7d\x9e\x16\xf9\x95\xbe\x2a\
\x30\x52\x23\x74\x41\x72\x7e\x2b\x25\x92\xe7\x4d\x8c\x38\x0a\xfb\
\xc2\xbd\x21\x12\xa0\xa4\x61\xc5\x15\x2b\x6a\x61\xdd\xd6\x92\x52\
\x39\x69\xfb\x1b\x51\x2b\x8d\xa5\x57\x52\x13\x7b\x1d\x8a\x2f\x7e\
\x34\x43\xec\x17\xdf\x04\xd2\x46\xdf\xb9\xe8\x8f\xb5\x92\xcc\x31\
\x91\x50\xd8\x4f\x73\x69\x31\xc2\x8e\x51\xe9\x27\x34\x3a\xb6\x6c\
\x89\x29\x88\x45\x6a\xd3\x57\x25\x33\x5a\xb6\x2c\xb9\x64\xc1\x46\
\xb6\xce\x56\x92\xa5\xd8\xa0\x33\x61\x84\x4b\x67\xa4\x6d\x68\x84\
\x4d\xa1\xf9\x91\x0d\xc1\x60\x8f\x96\x0e\x3d\x8d\xbe\xba\xec\xd9\
\x29\xb1\xf4\x34\xfb\x1b\x6f\x05\x4a\xab\x83\x06\x80\x96\x36\x7e\
\x2b\xca\xd6\x4a\x9a\xbe\xb1\x78\x0e\xc7\x94\x8a\x25\x4b\x56\x6c\
\xc4\x5d\x36\x61\x72\x18\xd1\x3e\xd2\x03\x95\x81\x26\x94\x76\xd9\
\x8f\x6b\x08\xb7\x47\x1e\xb3\xd4\x1f\x64\x5f\x40\xf7\x49\x23\x47\
\xd4\x0d\x1a\x74\x7e\x9b\xa2\x7e\x4a\x70\xa5\x0e\x07\x1a\x61\xcb\
\x1a\x99\xd6\xea\x58\x8b\x98\x78\x22\x18\x65\x2a\x02\x7b\xb5\xb0\
\x5f\x90\x3c\x03\x21\x94\xf5\x1a\xec\x4a\xce\x99\xef\x8d\xf8\x7a\
\x27\x82\x1f\xd3\x10\x7d\x82\xe2\x6b\xbe\xd5\x1f\xdc\x67\xe6\x37\
\x3d\x10\x12\x53\x85\xd0\xba\x5b\x45\xd9\xc9\x74\xae\x3f\x07\xad\
\x9c\x1f\x23\x88\x55\x5f\xbe\x57\x28\xa9\x1a\x2a\x59\x8e\x57\x0d\
\x56\x0f\xda\xbd\x15\x44\xbb\x2c\xf6\xb0\xb3\xe6\x13\xb0\x5e\x6e\
\xf4\x47\x33\x84\x93\x14\xd8\xa0\xa9\xf9\x8b\xfb\x9a\x2d\xff\xee\
\x93\x63\xa9\xf9\x5a\xd9\xba\xaa\x84\x41\xe7\x24\x7a\x6b\x12\x2a\
\xb6\x28\x51\x83\x4b\x24\x1c\x5a\x3a\xe2\xa0\x20\x13\xcb\x46\x56\
\xbf\x18\xc2\xca\x49\xe8\x85\x76\x86\xab\xb6\x75\xc0\xc3\x34\x3d\
\x4f\xbb\x0f\xd2\x9d\x44\x27\xaf\xaa\xa8\x9f\x8a\x6b\xdf\xcd\xf6\
\x78\xce\x98\x82\x7b\x6a\x99\x6d\xcd\xa5\xfb\x33\x7f\xe2\x5b\x7e\
\x4a\x45\xf5\x89\x0d\x9e\x82\x01\x04\xa7\x43\x36\x69\x44\xf0\xa6\
\x8f\x34\xdd\x80\x07\xd9\x4f\x62\x79\x44\x61\x26\xe2\xbd\x9d\x90\
\xc8\x75\x58\x26\xc1\x80\x4e\x60\x0e\x74\xaf\x76\x25\x7b\x26\x84\
\xb3\x5e\xdc\x75\x58\xb6\xbd\xc2\x89\xd8\x0f\x90\x3b\xce\x91\xc3\
\xb2\x70\x0b\x2e\xf8\x67\xfe\x09\xc5\xbb\xfc\x9c\xe3\x01\xbc\x46\
\xc8\xe4\x7a\xb3\xf4\xb1\x21\x11\xd8\x2d\x92\x1a\x40\xc9\x8a\xed\
\xed\x20\xb8\x36\x02\xb3\x19\x72\xb9\x2e\x5d\xd8\xe5\xea\x06\x68\
\xa6\x19\x0c\x1e\xa9\xbd\xde\x3b\x42\x27\x88\x48\x19\xc9\xc6\xa8\
\xe1\x93\xfe\x1e\x99\xe5\x2e\x3f\xef\x28\x3e\xa8\xfe\x70\xc9\x1f\
\xf9\x92\x33\xfe\xcc\x25\xbf\xe0\x7f\xe5\x5d\x52\xb6\x94\x32\x82\
\x14\x0d\x58\x6c\x04\x69\x1c\x5f\x1a\xa7\x02\xca\x27\xc2\xa0\xf4\
\xdc\x88\x9d\x80\x8f\x4f\xd5\x1f\x61\x84\x79\x5d\xcb\xa2\x80\xbe\
\xfe\x70\x62\xd8\xe1\xe2\xc4\x43\x05\x2c\x47\x25\x9a\x65\x89\x08\
\xba\xb8\x01\x14\x7c\x1d\xf3\xef\xa5\xa8\x43\x16\x47\xc5\xa5\x7b\
\xc8\x39\x5f\xf2\x57\xbe\xe6\x9c\x98\x9f\xf3\xbf\xf1\x26\x0d\x2d\
\x0b\x59\xe6\xa0\x83\x7e\x84\x0a\xab\xb2\x75\xc8\x34\x7a\x8a\x7a\
\x22\x99\x42\x44\xc4\x56\x8a\xea\x2a\xc8\xff\xf5\x88\xb4\xa7\x79\
\x58\xe9\x7c\x31\x70\xc1\xc3\x85\x73\x7a\xef\x74\x20\x9a\xa7\x8a\
\x8a\x52\xe8\xa6\xfa\x80\xc0\xf2\xca\x27\x42\x63\xd9\x7e\xb0\xfe\
\xc3\x05\xdf\xf0\x15\x0f\xb8\xa4\xe2\x92\x4b\x7e\xc2\xff\xc9\xbf\
\x25\x66\x03\x32\x97\xa3\x65\x34\x69\x08\xe9\x32\xe8\x7d\xba\xc0\
\x91\x31\x52\x76\xc5\x14\xc2\x89\x69\xc2\x10\x5a\x3c\x10\x57\xf1\
\x9a\xa8\x2a\x4c\x10\xab\xc1\xe2\x6b\xb5\x17\x3a\x77\xff\xa4\xd2\
\xf4\x6b\xf6\xb6\x52\x3f\x8b\x29\xfe\x12\x86\xa8\x59\xb8\x6f\xf8\
\x0b\x8f\x38\xe3\x9c\x0d\x35\x4f\xd8\xf0\x26\xff\x3b\x3f\x93\x8d\
\xcb\x88\xcc\x8a\xf7\xd6\xf1\xc1\x8f\x8c\xf7\xdc\xd9\xee\x29\x6a\
\xf9\x68\x47\x41\x2a\x93\xfa\x8d\x34\xfd\xeb\x80\x5e\xec\xc8\x24\
\xf6\x29\xb2\xda\xee\x3c\x0c\x77\x91\x97\x72\x36\x5b\x16\x3c\xa4\
\x70\x37\xdf\x4f\xee\xc5\x83\xc7\xfa\xca\x08\xd5\x63\xf7\x0d\xff\
\xcc\x1f\x45\x16\x53\x73\xc1\x97\x8c\xf8\x3f\xf8\xbf\x48\xb8\x12\
\x3c\xb0\x92\x2f\x31\xd2\x89\x3a\xfc\x56\x36\x60\x01\x3b\x49\x0c\
\x2b\xd2\x1b\x37\xa9\x99\x30\x66\xcc\x39\x89\x2c\x1c\xd1\xa2\x38\
\xd5\x84\xb0\x69\xaf\xd9\xd8\xd6\xb7\x15\xf7\x4f\xc4\x56\x46\xe7\
\xd6\xdc\xe3\x02\xf8\x8f\x5f\x4d\x98\xaa\x68\xaf\x0e\x7a\x09\x43\
\xf8\x36\x6b\x73\xd7\x7e\xfc\xff\xf0\x47\xfe\x2c\xd4\xe0\x46\xda\
\xef\x6f\xf3\xaf\x70\xac\x83\xb0\xa7\xef\x53\x44\x68\x6a\xb1\xe1\
\x6e\x3f\x67\x3b\x98\xc1\xe8\x84\x89\xb9\x3b\x11\x09\x15\x23\x66\
\x1c\xb3\xe4\x88\x05\x5b\x1e\xb3\x64\x1b\xe4\xc5\xd5\xde\x4e\x7a\
\xb5\xa7\x4d\xa0\x44\x21\x64\x5f\x46\x32\xa6\xa6\x60\x4e\xc6\x82\
\xff\xca\x37\xac\xf9\x37\xfc\x9d\xbb\xa1\x7a\xe5\x55\x2d\x2a\xb9\
\x2a\xd0\xd2\x77\x5b\x67\xaf\xc9\x23\x2a\x56\xce\x6b\xdd\x3f\x66\
\x41\x49\x1b\x10\x24\x98\x73\x2a\xea\x3e\x26\x88\xa3\x20\xd9\xc2\
\x7e\x57\x0b\x49\x88\x6d\x20\x07\x0c\x57\xd2\xfa\x29\x8d\x2c\x34\
\xf0\xfc\xc0\x5a\xce\x82\x95\x88\x67\x94\xc1\x59\xda\xf0\x3d\x76\
\x6d\xc2\x61\x66\xa9\x42\xae\x59\x0b\xeb\xaa\xa5\xe1\x82\xc7\xfc\
\xdf\x9c\x63\x79\xdf\xcd\x19\xa9\xdd\xf0\x33\xd2\x5e\xec\x88\x04\
\x40\xba\xbe\x17\xf6\x3f\x00\xe1\x00\x14\x01\xde\x26\x16\xb2\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x08\
\x0b\x77\x5a\x87\
\x00\x68\
\x00\x65\x00\x61\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 64.167861
| 96
| 0.727222
|
96a3d8c91604a0fdf3dc0c2713d890a04d3706ed
| 4,011
|
py
|
Python
|
TinyDCU_Net_16-master/main.py
|
scofir/AEC-Challenge
|
d0df780f428dd73dc40ef9cd917c089d2f1894fa
|
[
"MIT"
] | 1
|
2021-08-03T03:57:31.000Z
|
2021-08-03T03:57:31.000Z
|
TinyDCU_Net_16-master/main.py
|
scofir/AEC-Challenge
|
d0df780f428dd73dc40ef9cd917c089d2f1894fa
|
[
"MIT"
] | null | null | null |
TinyDCU_Net_16-master/main.py
|
scofir/AEC-Challenge
|
d0df780f428dd73dc40ef9cd917c089d2f1894fa
|
[
"MIT"
] | null | null | null |
import argparse
import os
from solver import Solver
from speech_data import get_reader
from torch.backends import cudnn
import random
import sys
import time
def main(config):
cudnn.benchmark = False
if config.model_type not in ['TinyDCU_Net_16']:
print('ERROR!! model_type should be selected in:')
print('TinyDCU_Net_16')
print('Your input for model_type was %s' % config.model_type)
return
# Create directories if not exist
if not os.path.exists(config.model_path):
os.makedirs(config.model_path)
if not os.path.exists(config.result_path):
os.makedirs(config.result_path)
config.result_path = os.path.join(config.result_path, config.model_type)
if not os.path.exists(config.result_path):
os.makedirs(config.result_path)
if not os.path.exists(config.logfile_path):
os.makedirs(config.logfile_path)
config.logfile_path = os.path.join(config.logfile_path, config.model_type + '.log')
lr = random.random() * 0.0005 + 0.0000005
epoch = 200
decay_ratio = random.random() * 0.8
decay_epoch = int(epoch * decay_ratio)
config.num_epochs = epoch
config.lr = lr
config.num_epochs_decay = decay_epoch
# print(config)
train_reader = get_reader(config, 'train', './list/clean_train.lst', './list/noise_train.lst')
valid_reader = get_reader(config, 'valid', './list/clean_valid.lst', './list/noise_valid.lst')
test_reader = get_reader(config, 'test', './list/clean_test.lst', './list/noise_test.lst')
solver = Solver(config, train_reader, valid_reader, test_reader)
# Train and sample the images
# solver.print_network(solver.unet, config.model_type)
if config.mode == 'train':
solver.train()
elif config.mode == 'test':
solver.test()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data parameters
parser.add_argument('--sample_rate', type=int, default=16000)
parser.add_argument('--frame_size', type=int, default=320)
parser.add_argument('--frame_shift', type=int, default=160)
parser.add_argument('--sent_width', type=int, default=512)
parser.add_argument('--sent_height', type=int, default=161)
parser.add_argument('--min_queue_size', type=int, default=64)
parser.add_argument('--chunk_length', type=int, default=4 * 16000)
# model hyper-parameters
parser.add_argument('--image_size', type=int, default=224)
parser.add_argument('--t', type=int, default=1,
help='t for Recurrent step of R2U_Net or R2AttU_Net and Interation step of DARCCN')
# training hyper-parameters
parser.add_argument('--img_ch', type=int, default=2)
parser.add_argument('--output_ch', type=int, default=2)
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--num_epochs_decay', type=int, default=70)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--num_workers', type=int, default=16)
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--beta1', type=float, default=0.5) # momentum1 in Adam
parser.add_argument('--beta2', type=float, default=0.999) # momentum2 in Adam
parser.add_argument('--half_lr', type=int, default=1,
help='Whether to decay learning rate to half scale')
parser.add_argument('--log_step', type=int, default=2)
parser.add_argument('--val_step', type=int, default=2)
# misc
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--model_type', type=str, default='TinyDCU_Net_16',
help='TinyDCU_Net_16')
parser.add_argument('--model_path', type=str, default='./models')
parser.add_argument('--logfile_path', type=str, default='./logs')
parser.add_argument('--result_path', type=str, default='./result')
parser.add_argument('--cuda_idx', type=int, default=1)
config = parser.parse_args()
main(config)
| 40.11
| 107
| 0.685615
|
d2fb57ff57b352da3909cb731feb66dd8723fe4c
| 462
|
py
|
Python
|
code/longest_substring_without_repeating_characters.py
|
shenhuaze/leetcode-python
|
b81bdb27d0f9da5620e83e2476c9ef585f4a0001
|
[
"MIT"
] | 1
|
2019-06-17T04:37:39.000Z
|
2019-06-17T04:37:39.000Z
|
code/longest_substring_without_repeating_characters.py
|
shenhuaze/leetcode-python
|
b81bdb27d0f9da5620e83e2476c9ef585f4a0001
|
[
"MIT"
] | null | null | null |
code/longest_substring_without_repeating_characters.py
|
shenhuaze/leetcode-python
|
b81bdb27d0f9da5620e83e2476c9ef585f4a0001
|
[
"MIT"
] | null | null | null |
def length_of_longest_substring(s):
char_positions = {}
left = -1
max_len = 0
for i in range(len(s)):
ch = s[i]
if ch in char_positions and char_positions[ch] > left:
left = char_positions[ch]
char_positions[ch] = i
max_len = max(max_len, i - left)
return max_len
if __name__ == "__main__":
# s_ = "abcabcbb"
s_ = "aab"
max_len_ = length_of_longest_substring(s_)
print(max_len_)
| 23.1
| 62
| 0.595238
|
650038a5f6d4ec0d0923e33ea109e34ba8d9c209
| 17,003
|
py
|
Python
|
bin/copyparty-fuseb.py
|
Daedren/copyparty
|
b9cf8f3973cc4be1df4cd50ad3b2a6ef38a99eb5
|
[
"MIT"
] | 47
|
2020-06-24T19:00:40.000Z
|
2022-03-23T03:57:40.000Z
|
bin/copyparty-fuseb.py
|
Daedren/copyparty
|
b9cf8f3973cc4be1df4cd50ad3b2a6ef38a99eb5
|
[
"MIT"
] | 7
|
2021-02-21T03:11:28.000Z
|
2021-12-03T01:09:56.000Z
|
bin/copyparty-fuseb.py
|
Daedren/copyparty
|
b9cf8f3973cc4be1df4cd50ad3b2a6ef38a99eb5
|
[
"MIT"
] | 5
|
2021-06-30T17:42:40.000Z
|
2022-01-24T04:59:07.000Z
|
#!/usr/bin/env python3
from __future__ import print_function, unicode_literals
"""copyparty-fuseb: remote copyparty as a local filesystem"""
__author__ = "ed <copyparty@ocv.me>"
__copyright__ = 2020
__license__ = "MIT"
__url__ = "https://github.com/9001/copyparty/"
import re
import os
import sys
import time
import stat
import errno
import struct
import threading
import http.client # py2: httplib
import urllib.parse
from datetime import datetime
from urllib.parse import quote_from_bytes as quote
try:
import fuse
from fuse import Fuse
fuse.fuse_python_api = (0, 2)
if not hasattr(fuse, "__version__"):
raise Exception("your fuse-python is way old")
except:
print(
"\n could not import fuse; these may help:\n python3 -m pip install --user fuse-python\n apt install libfuse\n modprobe fuse\n"
)
raise
"""
mount a copyparty server (local or remote) as a filesystem
usage:
python ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,url=http://192.168.1.69:3923 /mnt/nas
dependencies:
sudo apk add fuse-dev python3-dev
python3 -m pip install --user fuse-python
fork of copyparty-fuse.py based on fuse-python which
appears to be more compliant than fusepy? since this works with samba
(probably just my garbage code tbh)
"""
def threadless_log(msg):
print(msg + "\n", end="")
def boring_log(msg):
msg = "\033[36m{:012x}\033[0m {}\n".format(threading.current_thread().ident, msg)
print(msg[4:], end="")
def rice_tid():
tid = threading.current_thread().ident
c = struct.unpack(b"B" * 5, struct.pack(b">Q", tid)[-5:])
return "".join("\033[1;37;48;5;{}m{:02x}".format(x, x) for x in c) + "\033[0m"
def fancy_log(msg):
print("{} {}\n".format(rice_tid(), msg), end="")
def null_log(msg):
pass
info = fancy_log
log = fancy_log
dbg = fancy_log
log = null_log
dbg = null_log
def get_tid():
return threading.current_thread().ident
def html_dec(txt):
return (
txt.replace("<", "<")
.replace(">", ">")
.replace(""", '"')
.replace("&", "&")
)
class CacheNode(object):
def __init__(self, tag, data):
self.tag = tag
self.data = data
self.ts = time.time()
class Stat(fuse.Stat):
def __init__(self):
self.st_mode = 0
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 1
self.st_uid = 1000
self.st_gid = 1000
self.st_size = 0
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class Gateway(object):
def __init__(self, base_url):
self.base_url = base_url
ui = urllib.parse.urlparse(base_url)
self.web_root = ui.path.strip("/")
try:
self.web_host, self.web_port = ui.netloc.split(":")
self.web_port = int(self.web_port)
except:
self.web_host = ui.netloc
if ui.scheme == "http":
self.web_port = 80
elif ui.scheme == "https":
raise Exception("todo")
else:
raise Exception("bad url?")
self.conns = {}
def quotep(self, path):
# TODO: mojibake support
path = path.encode("utf-8", "ignore")
return quote(path, safe="/")
def getconn(self, tid=None):
tid = tid or get_tid()
try:
return self.conns[tid]
except:
info("new conn [{}] [{}]".format(self.web_host, self.web_port))
conn = http.client.HTTPConnection(self.web_host, self.web_port, timeout=260)
self.conns[tid] = conn
return conn
def closeconn(self, tid=None):
tid = tid or get_tid()
try:
self.conns[tid].close()
del self.conns[tid]
except:
pass
def sendreq(self, *args, **kwargs):
tid = get_tid()
try:
c = self.getconn(tid)
c.request(*list(args), **kwargs)
return c.getresponse()
except:
self.closeconn(tid)
c = self.getconn(tid)
c.request(*list(args), **kwargs)
return c.getresponse()
def listdir(self, path):
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?dots"
r = self.sendreq("GET", web_path)
if r.status != 200:
self.closeconn()
raise Exception(
"http error {} reading dir {} in {}".format(
r.status, web_path, rice_tid()
)
)
return self.parse_html(r)
def download_file_range(self, path, ofs1, ofs2):
web_path = self.quotep("/" + "/".join([self.web_root, path])) + "?raw"
hdr_range = "bytes={}-{}".format(ofs1, ofs2 - 1)
log("downloading {}".format(hdr_range))
r = self.sendreq("GET", web_path, headers={"Range": hdr_range})
if r.status != http.client.PARTIAL_CONTENT:
self.closeconn()
raise Exception(
"http error {} reading file {} range {} in {}".format(
r.status, web_path, hdr_range, rice_tid()
)
)
return r.read()
def parse_html(self, datasrc):
ret = []
remainder = b""
ptn = re.compile(
r"^<tr><td>(-|DIR)</td><td><a [^>]+>([^<]+)</a></td><td>([^<]+)</td><td>([^<]+)</td></tr>$"
)
while True:
buf = remainder + datasrc.read(4096)
# print('[{}]'.format(buf.decode('utf-8')))
if not buf:
break
remainder = b""
endpos = buf.rfind(b"\n")
if endpos >= 0:
remainder = buf[endpos + 1 :]
buf = buf[:endpos]
lines = buf.decode("utf-8").split("\n")
for line in lines:
m = ptn.match(line)
if not m:
# print(line)
continue
ftype, fname, fsize, fdate = m.groups()
fname = html_dec(fname)
ts = datetime.strptime(fdate, "%Y-%m-%d %H:%M:%S").timestamp()
sz = int(fsize)
if ftype == "-":
ret.append([fname, self.stat_file(ts, sz), 0])
else:
ret.append([fname, self.stat_dir(ts, sz), 0])
return ret
def stat_dir(self, ts, sz=4096):
ret = Stat()
ret.st_mode = stat.S_IFDIR | 0o555
ret.st_nlink = 2
ret.st_size = sz
ret.st_atime = ts
ret.st_mtime = ts
ret.st_ctime = ts
return ret
def stat_file(self, ts, sz):
ret = Stat()
ret.st_mode = stat.S_IFREG | 0o444
ret.st_size = sz
ret.st_atime = ts
ret.st_mtime = ts
ret.st_ctime = ts
return ret
class CPPF(Fuse):
def __init__(self, *args, **kwargs):
Fuse.__init__(self, *args, **kwargs)
self.url = None
self.dircache = []
self.dircache_mtx = threading.Lock()
self.filecache = []
self.filecache_mtx = threading.Lock()
def init2(self):
# TODO figure out how python-fuse wanted this to go
self.gw = Gateway(self.url) # .decode('utf-8'))
info("up")
def clean_dircache(self):
"""not threadsafe"""
now = time.time()
cutoff = 0
for cn in self.dircache:
if now - cn.ts > 1:
cutoff += 1
else:
break
if cutoff > 0:
self.dircache = self.dircache[cutoff:]
def get_cached_dir(self, dirpath):
# with self.dircache_mtx:
if True:
self.clean_dircache()
for cn in self.dircache:
if cn.tag == dirpath:
return cn
return None
"""
,-------------------------------, g1>=c1, g2<=c2
|cache1 cache2| buf[g1-c1:(g1-c1)+(g2-g1)]
`-------------------------------'
,---------------,
|get1 get2|
`---------------'
__________________________________________________________________________
,-------------------------------, g2<=c2, (g2>=c1)
|cache1 cache2| cdr=buf[:g2-c1]
`-------------------------------' dl car; g1-512K:c1
,---------------,
|get1 get2|
`---------------'
__________________________________________________________________________
,-------------------------------, g1>=c1, (g1<=c2)
|cache1 cache2| car=buf[c2-g1:]
`-------------------------------' dl cdr; c2:c2+1M
,---------------,
|get1 get2|
`---------------'
"""
def get_cached_file(self, path, get1, get2, file_sz):
car = None
cdr = None
ncn = -1
# with self.filecache_mtx:
if True:
dbg("cache request from {} to {}, size {}".format(get1, get2, file_sz))
for cn in self.filecache:
ncn += 1
cache_path, cache1 = cn.tag
if cache_path != path:
continue
cache2 = cache1 + len(cn.data)
if get2 <= cache1 or get1 >= cache2:
continue
if get1 >= cache1 and get2 <= cache2:
# keep cache entry alive by moving it to the end
self.filecache = (
self.filecache[:ncn] + self.filecache[ncn + 1 :] + [cn]
)
buf_ofs = get1 - cache1
buf_end = buf_ofs + (get2 - get1)
dbg(
"found all ({}, {} to {}, len {}) [{}:{}] = {}".format(
ncn,
cache1,
cache2,
len(cn.data),
buf_ofs,
buf_end,
buf_end - buf_ofs,
)
)
return cn.data[buf_ofs:buf_end]
if get2 < cache2:
x = cn.data[: get2 - cache1]
if not cdr or len(cdr) < len(x):
dbg(
"found car ({}, {} to {}, len {}) [:{}-{}] = [:{}] = {}".format(
ncn,
cache1,
cache2,
len(cn.data),
get2,
cache1,
get2 - cache1,
len(x),
)
)
cdr = x
continue
if get1 > cache1:
x = cn.data[-(cache2 - get1) :]
if not car or len(car) < len(x):
dbg(
"found cdr ({}, {} to {}, len {}) [-({}-{}):] = [-{}:] = {}".format(
ncn,
cache1,
cache2,
len(cn.data),
cache2,
get1,
cache2 - get1,
len(x),
)
)
car = x
continue
raise Exception("what")
if car and cdr:
dbg("<cache> have both")
ret = car + cdr
if len(ret) == get2 - get1:
return ret
raise Exception("{} + {} != {} - {}".format(len(car), len(cdr), get2, get1))
elif cdr:
h_end = get1 + (get2 - get1) - len(cdr)
h_ofs = h_end - 512 * 1024
if h_ofs < 0:
h_ofs = 0
buf_ofs = (get2 - get1) - len(cdr)
dbg(
"<cache> cdr {}, car {}-{}={} [-{}:]".format(
len(cdr), h_ofs, h_end, h_end - h_ofs, buf_ofs
)
)
buf = self.gw.download_file_range(path, h_ofs, h_end)
ret = buf[-buf_ofs:] + cdr
elif car:
h_ofs = get1 + len(car)
h_end = h_ofs + 1024 * 1024
if h_end > file_sz:
h_end = file_sz
buf_ofs = (get2 - get1) - len(car)
dbg(
"<cache> car {}, cdr {}-{}={} [:{}]".format(
len(car), h_ofs, h_end, h_end - h_ofs, buf_ofs
)
)
buf = self.gw.download_file_range(path, h_ofs, h_end)
ret = car + buf[:buf_ofs]
else:
h_ofs = get1 - 256 * 1024
h_end = get2 + 1024 * 1024
if h_ofs < 0:
h_ofs = 0
if h_end > file_sz:
h_end = file_sz
buf_ofs = get1 - h_ofs
buf_end = buf_ofs + get2 - get1
dbg(
"<cache> {}-{}={} [{}:{}]".format(
h_ofs, h_end, h_end - h_ofs, buf_ofs, buf_end
)
)
buf = self.gw.download_file_range(path, h_ofs, h_end)
ret = buf[buf_ofs:buf_end]
cn = CacheNode([path, h_ofs], buf)
# with self.filecache_mtx:
if True:
if len(self.filecache) > 6:
self.filecache = self.filecache[1:] + [cn]
else:
self.filecache.append(cn)
return ret
def _readdir(self, path):
path = path.strip("/")
log("readdir {}".format(path))
ret = self.gw.listdir(path)
# with self.dircache_mtx:
if True:
cn = CacheNode(path, ret)
self.dircache.append(cn)
self.clean_dircache()
return ret
def readdir(self, path, offset):
for e in self._readdir(path)[offset:]:
# log("yield [{}]".format(e[0]))
yield fuse.Direntry(e[0])
def open(self, path, flags):
if (flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)) != os.O_RDONLY:
return -errno.EACCES
st = self.getattr(path)
try:
if st.st_nlink > 0:
return st
except:
return st # -int(os.errcode)
def read(self, path, length, offset, fh=None, *args):
if args:
log("unexpected args [" + "] [".join(repr(x) for x in args) + "]")
raise Exception()
path = path.strip("/")
ofs2 = offset + length
log("read {} @ {} len {} end {}".format(path, offset, length, ofs2))
st = self.getattr(path)
try:
file_sz = st.st_size
except:
return st # -int(os.errcode)
if ofs2 > file_sz:
ofs2 = file_sz
log("truncate to len {} end {}".format(ofs2 - offset, ofs2))
if file_sz == 0 or offset >= ofs2:
return b""
# toggle cache here i suppose
# return self.get_cached_file(path, offset, ofs2, file_sz)
return self.gw.download_file_range(path, offset, ofs2)
def getattr(self, path):
log("getattr [{}]".format(path))
path = path.strip("/")
try:
dirpath, fname = path.rsplit("/", 1)
except:
dirpath = ""
fname = path
if not path:
ret = self.gw.stat_dir(time.time())
dbg("=root")
return ret
cn = self.get_cached_dir(dirpath)
if cn:
log("cache ok")
dents = cn.data
else:
log("cache miss")
dents = self._readdir(dirpath)
for cache_name, cache_stat, _ in dents:
if cache_name == fname:
dbg("=file")
return cache_stat
log("=404")
return -errno.ENOENT
def main():
time.strptime("19970815", "%Y%m%d") # python#7980
server = CPPF()
server.parser.add_option(mountopt="url", metavar="BASE_URL", default=None)
server.parse(values=server, errex=1)
if not server.url or not str(server.url).startswith("http"):
print("\nerror:")
print(" need argument: -o url=<...>")
print(" need argument: mount-path")
print("example:")
print(
" ./copyparty-fuseb.py -f -o allow_other,auto_unmount,nonempty,url=http://192.168.1.69:3923 /mnt/nas"
)
sys.exit(1)
server.init2()
threading.Thread(target=server.main, daemon=True).start()
while True:
time.sleep(9001)
if __name__ == "__main__":
main()
| 28.67285
| 145
| 0.454449
|
3fb5a40fc4f51b0d666000bbb290ecc8b3fafebc
| 5,507
|
py
|
Python
|
resqpy/organize/rock_fluid_unit_feature.py
|
poc11/resqpy
|
5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2
|
[
"MIT"
] | null | null | null |
resqpy/organize/rock_fluid_unit_feature.py
|
poc11/resqpy
|
5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2
|
[
"MIT"
] | null | null | null |
resqpy/organize/rock_fluid_unit_feature.py
|
poc11/resqpy
|
5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2
|
[
"MIT"
] | null | null | null |
"""Class for RESQML Rock Fluid Unit Feature organizational objects."""
from ._utils import (equivalent_extra_metadata, alias_for_attribute, extract_has_occurred_during,
equivalent_chrono_pairs, create_xml_has_occurred_during)
import resqpy.olio.uuid as bu
import resqpy.olio.xml_et as rqet
from resqpy.olio.base import BaseResqpy
from resqpy.olio.xml_namespaces import curly_namespace as ns
from .boundary_feature import BoundaryFeature
class RockFluidUnitFeature(BaseResqpy):
"""Class for RESQML Rock Fluid Unit Feature organizational objects."""
resqml_type = "RockFluidUnitFeature"
feature_name = alias_for_attribute("title")
valid_phases = ('aquifer', 'gas cap', 'oil column', 'seal')
def __init__(self,
parent_model,
uuid = None,
phase = None,
feature_name = None,
top_boundary_feature = None,
base_boundary_feature = None,
extra_metadata = None):
"""Initialises a rock fluid unit feature organisational object."""
self.phase = phase
self.top_boundary_feature = top_boundary_feature
self.base_boundary_feature = base_boundary_feature
super().__init__(model = parent_model, uuid = uuid, title = feature_name, extra_metadata = extra_metadata)
def is_equivalent(self, other, check_extra_metadata = True):
"""Returns True if this feature is essentially the same as the other; otherwise False."""
if other is None or not isinstance(other, RockFluidUnitFeature):
return False
if self is other or bu.matching_uuids(self.uuid, other.uuid):
return True
if self.feature_name != other.feature_name or self.phase != other.phase:
return False
if self.top_boundary_feature is not None:
if not self.top_boundary_feature.is_equivalent(other.top_boundary_feature):
return False
elif other.top_boundary_feature is not None:
return False
if self.base_boundary_feature is not None:
if not self.base_boundary_feature.is_equivalent(other.base_boundary_feature):
return False
elif other.base_boundary_feature is not None:
return False
if check_extra_metadata and not equivalent_extra_metadata(self, other):
return False
return True
def _load_from_xml(self):
self.phase = rqet.find_tag_text(self.root, 'Phase')
feature_ref_node = rqet.find_tag(self.root, 'FluidBoundaryTop')
assert feature_ref_node is not None
feature_root = self.model.referenced_node(feature_ref_node)
feature_uuid = rqet.uuid_for_part_root(feature_root)
assert feature_uuid is not None, 'rock fluid top boundary feature missing from model'
self.top_boundary_feature = BoundaryFeature(self.model, uuid = feature_uuid)
feature_ref_node = rqet.find_tag(self.root, 'FluidBoundaryBottom')
assert feature_ref_node is not None
feature_root = self.model.referenced_node(feature_ref_node)
feature_uuid = rqet.uuid_for_part_root(feature_root)
assert feature_uuid is not None, 'rock fluid bottom boundary feature missing from model'
self.base_boundary_feature = BoundaryFeature(self.model, uuid = feature_uuid)
def create_xml(self, add_as_part = True, add_relationships = True, originator = None, reuse = True):
"""Creates a rock fluid unit feature organisational xml node from this rock fluid unit feature object."""
assert self.feature_name and self.phase and self.top_boundary_feature and self.base_boundary_feature
if self.phase not in self.valid_phases:
raise ValueError(f"Phase '{self.phase}' not recognized")
if reuse and self.try_reuse():
return self.root # check for reusable (equivalent) object
# create node with citation block
rfuf = super().create_xml(add_as_part = False, originator = originator)
phase_node = rqet.SubElement(rfuf, ns['resqml2'] + 'Phase')
phase_node.set(ns['xsi'] + 'type', ns['resqml2'] + 'Phase')
phase_node.text = self.phase
top_boundary_root = self.top_boundary_feature.root
assert top_boundary_root is not None
self.model.create_ref_node('FluidBoundaryTop',
self.model.title_for_root(top_boundary_root),
top_boundary_root.attrib['uuid'],
content_type = 'obj_BoundaryFeature',
root = rfuf)
base_boundary_root = self.base_boundary_feature.root
assert base_boundary_root is not None
self.model.create_ref_node('FluidBoundaryBottom',
self.model.title_for_root(base_boundary_root),
base_boundary_root.attrib['uuid'],
content_type = 'obj_BoundaryFeature',
root = rfuf)
if add_as_part:
self.model.add_part('obj_RockFluidUnitFeature', self.uuid, rfuf)
if add_relationships:
self.model.create_reciprocal_relationship(rfuf, 'destinationObject', top_boundary_root, 'sourceObject')
self.model.create_reciprocal_relationship(rfuf, 'destinationObject', base_boundary_root, 'sourceObject')
return rfuf
| 47.474138
| 120
| 0.663338
|
265af4950a1371cc9b5211e2aabd9a40819e92aa
| 3,158
|
py
|
Python
|
vis/variation.py
|
uhrwecker/GRDonuts
|
3087aeb5c169251bdb711b425dcc3040ff962da7
|
[
"MIT"
] | null | null | null |
vis/variation.py
|
uhrwecker/GRDonuts
|
3087aeb5c169251bdb711b425dcc3040ff962da7
|
[
"MIT"
] | 25
|
2020-03-26T11:16:58.000Z
|
2020-09-10T18:31:52.000Z
|
vis/variation.py
|
uhrwecker/GRDonuts
|
3087aeb5c169251bdb711b425dcc3040ff962da7
|
[
"MIT"
] | null | null | null |
from vis.simple import Plotter
import matplotlib.pyplot as pl
import numpy as np
class OneParamVarPlotter(Plotter):
def __init__(self, figsize=(10, 10), save=None):
super().__init__(figsize=figsize, save=save)
def plot(self, potential, name, rang, num=1000,
verbose=False):
setattr(potential, 'verbose', verbose)
print('Start calculating closed surfaces. \n This may take a while ...')
y_val = potential.one_parameter_variation_stability_test(name, rang,
num=num)
print('Plotting ...')
ax = pl.gca()
ax.plot(np.linspace(rang[0], rang[1], num=len(y_val)), y_val,
label='closure area (roche limit) for theta=pi/2')
ax.set_ylim(0, 1.05)
ax.set_xlim(rang[0], rang[1])
ax.set_ylabel('closure rating')
ax.set_xlabel(name)
pl.legend()
if self.save:
pl.savefig(self.save)
else:
pl.show()
class TwoParamVarPlotter(Plotter):
def __init__(self, figsize=(10, 10), save=None):
super().__init__(figsize=figsize, save=save)
def plot(self, potential, name1, rang1, name2,
rang2, num1=100, num2=100, verbose=False):
self.n1 = name1
self.n2 = name2
self.num1 = num1
self.r1 = rang1
self.r2 = rang2
self.num2 = num2
def format_func_x(value, tick_number):
tick = self.r2[0] + (self.r2[1]-self.r2[0]) * value / self.num2
return str(tick)[:5]
def format_func_y(value, tick_number):
tick = self.r1[0] + (self.r1[1]-self.r1[0]) * value / self.num1
return str(tick)[:5]
setattr(potential, 'verbose', verbose)
print('Start calculating closed surfaces. \n This may take a while ...')
matrix = potential.two_parameter_variation_stability_test(name1,
rang1,
name2,
rang2,
num1=num1,
num2=num2)
print('Calculated Matrix.')
print('Plotting ...')
ax = self.figure.gca()
cmap = pl.get_cmap('autumn')
ax.imshow(matrix, cmap=cmap)
# just for labels
ax.plot(0, 0, label='numerically computated closed surfaces', c='red')
ax.xaxis.set_major_formatter(pl.FuncFormatter(format_func_x))
ax.yaxis.set_major_formatter(pl.FuncFormatter(format_func_y))
ax.axvline(self.num2/2, c='black')
#ax.axhline(self.num1/2, c='black')
ax.set_ylim(0, self.num1)
ax.set_xlim(0, self.num2)
ax.set_ylabel(self.n1)
ax.set_xlabel(self.n2)
if self.save:
pl.savefig(self.save)
else:
#pl.show()
return ax
| 29.792453
| 80
| 0.5019
|
af27c84c170c7f47430d9c4a6aa297393180685a
| 1,759
|
py
|
Python
|
tests/apply/test__line_matches_pattern.py
|
tombaker/mklists
|
1a4150d5cc2df81604fbfbb2dbad2bd74d405a5f
|
[
"MIT"
] | 1
|
2018-07-25T13:22:31.000Z
|
2018-07-25T13:22:31.000Z
|
tests/apply/test__line_matches_pattern.py
|
tombaker/mklists
|
1a4150d5cc2df81604fbfbb2dbad2bd74d405a5f
|
[
"MIT"
] | 8
|
2015-03-14T06:40:24.000Z
|
2019-09-04T11:40:22.000Z
|
tests/apply/test__line_matches_pattern.py
|
tombaker/mklists
|
1a4150d5cc2df81604fbfbb2dbad2bd74d405a5f
|
[
"MIT"
] | null | null | null |
"""Returns True if line (or part of line) matches a given regular expression."""
from mklists.apply import _line_matches_pattern
def test_match():
"""Match simple regex in field 1."""
pattern = "NOW"
field_int = 1
line = "NOW Buy milk"
assert _line_matches_pattern(pattern, field_int, line)
def test_no_match():
"""No match to simple regex in field 1."""
pattern = "NOW"
field_int = 1
line = "LATER Buy milk"
assert not _line_matches_pattern(pattern, field_int, line)
def test_match_despite_leading_whitespace():
"""Match, despite leading whitespace."""
pattern = "NOW"
field_int = 1
line = " NOW Buy milk"
assert _line_matches_pattern(pattern, field_int, line)
def test_match_despite_leading_whitespace_with_caret():
"""Match, despite leading whitespace, which is lost to .split()."""
pattern = "^NOW"
field_int = 1
line = " NOW Buy milk"
assert _line_matches_pattern(pattern, field_int, line)
def test_match_start_of_entire_line():
"""Match, because regex matches start of entire line."""
pattern = "^NOW"
field_int = 0
line = "NOW Buy milk"
assert _line_matches_pattern(pattern, field_int, line)
def test_match_when_parenthesis_properly_escaped():
"""Match, because open paren is properly escaped."""
# pylint: disable=anomalous-backslash-in-string
pattern = "^N\(OW"
field_int = 0
line = "N(OW Buy milk"
assert _line_matches_pattern(pattern, field_int, line)
def test_no_match_when_line_has_less_fields_than_source_matchfield():
"""No match, because line has less than six fields."""
pattern = "^NOW"
field_int = 6
line = "NOW Buy milk"
assert not _line_matches_pattern(pattern, field_int, line)
| 28.836066
| 80
| 0.697555
|
9c6dc280252899fac307af96f70bc3cf52e97712
| 4,470
|
py
|
Python
|
sagemaker-python-sdk/mxnet_onnx_export/mnist.py
|
zanhsieh/amazon-sagemaker-examples
|
a09b5aa61c27b21459ef9732288ff05aac45cf24
|
[
"Apache-2.0"
] | 4
|
2018-12-03T08:14:15.000Z
|
2019-01-25T04:06:20.000Z
|
sagemaker-python-sdk/mxnet_onnx_export/mnist.py
|
zanhsieh/amazon-sagemaker-examples
|
a09b5aa61c27b21459ef9732288ff05aac45cf24
|
[
"Apache-2.0"
] | 1
|
2019-04-10T20:21:18.000Z
|
2019-04-10T20:21:18.000Z
|
sagemaker-python-sdk/mxnet_onnx_export/mnist.py
|
zanhsieh/amazon-sagemaker-examples
|
a09b5aa61c27b21459ef9732288ff05aac45cf24
|
[
"Apache-2.0"
] | 2
|
2019-07-09T18:32:20.000Z
|
2020-09-11T19:07:55.000Z
|
import argparse
import gzip
import json
import logging
import os
import tempfile
import shutil
import struct
import mxnet as mx
from mxnet.contrib import onnx as onnx_mxnet
import numpy as np
from sagemaker_mxnet_container.training_utils import scheduler_host
def load_data(path):
with gzip.open(find_file(path, "labels.gz")) as flbl:
struct.unpack(">II", flbl.read(8))
labels = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(find_file(path, "images.gz")) as fimg:
_, _, rows, cols = struct.unpack(">IIII", fimg.read(16))
images = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(labels), rows, cols)
images = images.reshape(images.shape[0], 1, 28, 28).astype(np.float32) / 255
return labels, images
def find_file(root_path, file_name):
for root, dirs, files in os.walk(root_path):
if file_name in files:
return os.path.join(root, file_name)
def build_graph():
data = mx.sym.var('data')
data = mx.sym.flatten(data=data)
fc1 = mx.sym.FullyConnected(data=data, num_hidden=128)
act1 = mx.sym.Activation(data=fc1, act_type="relu")
fc2 = mx.sym.FullyConnected(data=act1, num_hidden=64)
act2 = mx.sym.Activation(data=fc2, act_type="relu")
fc3 = mx.sym.FullyConnected(data=act2, num_hidden=10)
return mx.sym.SoftmaxOutput(data=fc3, name='softmax')
def get_training_context(num_gpus):
if num_gpus:
return [mx.gpu(i) for i in range(num_gpus)]
else:
return mx.cpu()
def train(batch_size, epochs, learning_rate, num_gpus, training_channel, testing_channel,
hosts, current_host, model_dir):
(train_labels, train_images) = load_data(training_channel)
(test_labels, test_images) = load_data(testing_channel)
# Data parallel training - shard the data so each host
# only trains on a subset of the total data.
shard_size = len(train_images) // len(hosts)
for i, host in enumerate(hosts):
if host == current_host:
start = shard_size * i
end = start + shard_size
break
train_iter = mx.io.NDArrayIter(train_images[start:end], train_labels[start:end], batch_size,
shuffle=True)
val_iter = mx.io.NDArrayIter(test_images, test_labels, batch_size)
logging.getLogger().setLevel(logging.DEBUG)
kvstore = 'local' if len(hosts) == 1 else 'dist_sync'
mlp_model = mx.mod.Module(symbol=build_graph(),
context=get_training_context(num_gpus))
mlp_model.fit(train_iter,
eval_data=val_iter,
kvstore=kvstore,
optimizer='sgd',
optimizer_params={'learning_rate': learning_rate},
eval_metric='acc',
batch_end_callback=mx.callback.Speedometer(batch_size, 100),
num_epoch=epochs)
if current_host == scheduler_host(hosts):
save(model_dir, mlp_model)
def save(model_dir, model):
tmp_dir = tempfile.mkdtemp()
symbol_file = os.path.join(tmp_dir, 'model-symbol.json')
params_file = os.path.join(tmp_dir, 'model-0000.params')
model.symbol.save(symbol_file)
model.save_params(params_file)
data_shapes = [[dim for dim in data_desc.shape] for data_desc in model.data_shapes]
output_path = os.path.join(model_dir, 'model.onnx')
onnx_mxnet.export_model(symbol_file, params_file, data_shapes, np.float32, output_path)
shutil.rmtree(tmp_dir)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--learning-rate', type=float, default=0.1)
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--test', type=str, default=os.environ['SM_CHANNEL_TEST'])
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
num_gpus = int(os.environ['SM_NUM_GPUS'])
train(args.batch_size, args.epochs, args.learning_rate, num_gpus, args.train, args.test,
args.hosts, args.current_host, args.model_dir)
| 34.651163
| 96
| 0.675615
|
ba046e33bad76145aeea6022de0a045a913b35d6
| 8,044
|
py
|
Python
|
Constrained_GaussianProcess/_HmcSampler.py
|
liaowangh/constrained_gp
|
6bb2b68a29d20a915ab52e7b8feb2fdac81b755e
|
[
"MIT"
] | 1
|
2020-04-12T14:12:16.000Z
|
2020-04-12T14:12:16.000Z
|
build/lib/Constrained_GaussianProcess/_HmcSampler.py
|
liaowangh/constrained_gp
|
6bb2b68a29d20a915ab52e7b8feb2fdac81b755e
|
[
"MIT"
] | null | null | null |
build/lib/Constrained_GaussianProcess/_HmcSampler.py
|
liaowangh/constrained_gp
|
6bb2b68a29d20a915ab52e7b8feb2fdac81b755e
|
[
"MIT"
] | null | null | null |
# Python implementation of "Exact Hamiltonian Monte Carlo for Truncated Multivariate Gaussian"
# this script is written based on https://cran.r-project.org/web/packages/tmg/index.html
# Author: Liaowang Huang <liahuang@student.ethz.ch>
import numpy as np
import scipy.linalg
class HmcSampler:
min_t = 0.00001
def __init__(self, dim, init, f, g, verbose):
"""
:param dim: dimension
:param init: (dim, ), the initial value for HMC
:param f: (q, dim), coefficient for linear constraints
:param g: (q,), linear constraints: f*X+g >= 0
"""
self.dim = dim
self.lastSample = init
self.f = f
self.g = g
self.verbose = verbose
def getNextLinearHitTime(self, a, b):
"""
the position x(t) = a * sin(t) + b * cos(t)
:param a: (dim, ) initial value for a (initial velocity)
:param b: (dim, ) initial value for b (initial position)
:return: hit_time: the time for the hit
cn : the cn-th constraint is active at hit time.
"""
hit_time = 0
cn = 0
if self.f is None:
return hit_time, cn
f = self.f
g = self.g
for i in range(f.shape[0]):
# constraints: f[i].dot(x)+g[i] >= 0
fa = f[i].dot(a)
fb = f[i].dot(b)
u = np.sqrt(fa*fa + fb*fb)
# if u > g[i] and u > -g[i]:
if -u < g[i] < u:
# otherwise the constrain will always be satisfied
phi = np.arctan2(-fa, fb) # -pi < phi < pi
t1 = np.arccos(-g[i]/u) - phi # -pi < t1 < 2*pi
if t1 < 0:
t1 += 2 * np.pi # 0 < t1 < 2*pi
if np.abs(t1) < self.min_t or \
np.abs(t1-2*np.pi) < self.min_t:
t1 = 0
t2 = -t1 - 2*phi # -4*pi < t2 < 2*pi
if t2 < 0:
t2 += 2*np.pi # -2*pi < t2 < 2*pi
if t2 < 0:
t2 += 2*np.pi # 0 < t2 < 2*pi
if np.abs(t2) < self.min_t or \
np.abs(t2 - 2 * np.pi) < self.min_t:
t2 = 0
if t1 == 0:
t = t2
elif t2 == 0:
t = t1
else:
t = np.minimum(t1, t2)
if self.min_t < t and (hit_time == 0 or t < hit_time):
hit_time = t
cn = i
return hit_time, cn
def verifyConstraints(self, b):
"""
:param b:
:return:
"""
if self.f is not None:
return np.min(self.f@b + self.g)
else:
return 1
def sampleNext(self):
T = np.pi / 2 # how much time to move
b = self.lastSample
dim = self.dim
count_sample_vel = 0
while True:
velsign = 0
# sample new initial velocity
a = np.random.normal(0, 1, dim)
count_sample_vel += 1
if self.verbose and count_sample_vel % 50 == 0:
print("Has sampled %d times of initial velocity." % count_sample_vel)
tt = T # the time left to move
while True:
t, c1 = self.getNextLinearHitTime(a, b)
# t: how much time to move to hit the boundary, if t == 0, move tt
# c1: the strict constraint at hit time
if t == 0 or tt < t:
# if no wall to be hit (t == 0) or not enough
# time left to hit the wall (tt < t)
break
tt -= t # time left to move after hitting the wall
new_b = np.sin(t) * a + np.cos(t) * b # hit location
hit_vel = np.cos(t) * a - np.sin(t) * b # hit velocity
b = new_b
# reflect the velocity and verify that it points in the right direction
f2 = np.dot(self.f[c1], self.f[c1])
alpha = np.dot(self.f[c1], hit_vel) / f2
a = hit_vel - 2*alpha*self.f[c1] # reflected velocity
velsign = a.dot(self.f[c1])
if velsign < 0:
# get out of inner while, resample the velocity and start again
# this occurs rarelly, due to numerical instabilities
break
if velsign < 0:
# go to the beginning of outer while
continue
bb = np.sin(tt) * a + np.cos(tt) * b
check = self.verifyConstraints(bb)
if check >= 0:
# verify that we don't violate the constraints
# due to a numerical instability
if self.verbose:
print("total number of velocity samples : %d" % count_sample_vel)
self.lastSample = bb
return bb
def tmg(n, mu, M, initial, f=None, g=None, burn_in=30, verbose=False):
"""
This function generates samples from a Markov chain whose equilibrium distribution is a d-dimensional
multivariate Gaussian truncated by linear inequalities. The log probability density is
log p(X) = -0.5 (X-mu)^T M^-1 (X-mu) + const
in terms of a covariance matrix M and a mean vector mu. The constraints are imposed as explained below.
The Markov chain is built using the Hamiltonian Monte Carlo technique.
:param n: Number of samples.
:param mu: (m,) vector for the mean of multivariate Gaussian density
:param M: (m,m) covariance matrix of the multivariate Gaussian density
:param initial: (m,) vector with the initial value of the Markov chain. Must satisfy
the truncation inequalities strictly.
:param f: (q,m) matrix, where q is the number of linear constraints. The constraints require each component
of the m-dimensional vector fX+g to be non-negative
:param g: (q,) vector with the constant terms in the above linear constraints.
:param burn_in: The number of burn-in iterations. The Markov chain is sampled n + burn_in
times, and the last n samples are returned.
:param verbose:
:return: (n, m)
"""
dim = len(mu)
if M.shape[1] != dim:
raise ValueError("The covariance matrix must be square.")
if len(initial) != dim:
raise ValueError("Wrong length for initial value vector.")
# verify that M is positive definite, it will raise an error if M is not SPD
R = np.linalg.cholesky(M)
# we change variable to the canonical frame, and transform back after sampling
# X ~ N(mu, M), then R^-1(X-mu) ~ N(0, I)
init_trans = scipy.linalg.solve(R, initial - mu) # the new initial value
if f is not None:
if f.shape[0] != len(g) or f.shape[1] != dim:
raise ValueError("Inconsistent linear constraints. f must \
be an d-by-m matrix and g an d-dimensional vector.")
# g may contains infinity, extract valid constraints
valid = np.logical_and(g < np.inf, g > -np.inf)
g = g[valid]
f = f[valid]
# verify initial value satisfies linear constraints
if np.any(f@initial+g < 0):
raise ValueError("Initial point violates linear constraints.")
# map linear constraints to canonical frame
f_trans = f@R
g_trans = f@mu+g
hmc = HmcSampler(dim, init_trans, f_trans, g_trans, verbose=verbose)
else:
hmc = HmcSampler(dim, init_trans, f, g, verbose=verbose)
samples = np.zeros((n, dim))
for i in range(burn_in):
if verbose:
print("="*30 + " (burn in) sample {} ".format(i) + "="*30)
hmc.sampleNext()
for i in range(n):
if verbose:
print("=" * 30 + " sample {} ".format(i) + "=" * 30)
samples[i] = hmc.sampleNext()
# transform back
return samples @ R.T + mu
| 36.234234
| 117
| 0.522626
|
f41ffcd0aeab13b18263919188e4696d0d2e804a
| 9,286
|
py
|
Python
|
Script_Extraction_API/protoype quadrillage recussifs/quadrillage_recurssif.py
|
CazabetLyon1/lyon_resto_2018_Autumn
|
2d9f495b1fdcea3e9f9e0945baf4d49f24b17de4
|
[
"DOC"
] | 1
|
2020-10-25T08:21:19.000Z
|
2020-10-25T08:21:19.000Z
|
Script_Extraction_API/protoype quadrillage recussifs/quadrillage_recurssif.py
|
CazabetLyon1/lyon_resto_2018_Autumn
|
2d9f495b1fdcea3e9f9e0945baf4d49f24b17de4
|
[
"DOC"
] | null | null | null |
Script_Extraction_API/protoype quadrillage recussifs/quadrillage_recurssif.py
|
CazabetLyon1/lyon_resto_2018_Autumn
|
2d9f495b1fdcea3e9f9e0945baf4d49f24b17de4
|
[
"DOC"
] | null | null | null |
from math import *
import copy
import json
import time
from geopy import distance
import matplotlib.pyplot as plt
import numpy as np
#liste en variable globale qui contient les coordonnées et le rayon associé de tout les appels qu'il faut faire pour couvrir la meme zone que l'ancien json donné
l=[]
NB_MAX_RES = 50
#fonction qui lit le json, on obtient une liste qui contient des dictionnaire, une clé ["lat"] ["lng"] ["nom"] pour chaque
def lect_json (nom_fich):
with open(nom_fich,"r", encoding="utf8") as f:
restaurants=json.load(f)
print(type(restaurants))
liste=[]
for resto in restaurants:
dict = {}
dict["lat"]=resto["geometry"]["location"]["lat"]
dict["lng"]=resto["geometry"]["location"]["lng"]
dict["nom"]=resto["name"]
liste.append(dict)
return liste
def calc_min_max_2 (liste):
""" recherche du min et du max pour les points afin de générer le point en haut a gauche,a droite en bas a gauche et droite """
dict= {}
for resto in liste:
if "latmax" not in dict:
dict["lngmax"]=resto.copy()
dict["lngmin"]=resto.copy()
dict["latmax"]=resto.copy()
dict["latmin"]=resto.copy()
if ((float)(resto["lng"])>dict["lngmax"]["lng"]):
dict["lngmax"]=resto.copy()
if((float)(resto["lng"])<dict["lngmin"]["lng"]):
dict["lngmin"]=resto.copy()
if ((float)(resto["lat"])>dict["latmax"]["lat"]):
dict["latmax"]=resto.copy()
if ((float)(resto["lat"])<dict["latmin"]["lat"]):
dict["latmin"]=resto.copy()
dict_point={}
dict_point["haut_gauche"]={}
dict_point["haut_gauche"]["lat"]=dict["latmax"]["lat"] # ATTENTION : lng va du - vers le plus de gauche a droite , et lat va du - vers le + de haut en bas
dict_point["haut_gauche"]["lng"]=dict["lngmin"]["lng"]
dict_point["bas_gauche"]={}
dict_point["bas_gauche"]["lat"]=dict["latmin"]["lat"]
dict_point["bas_gauche"]["lng"]=dict["lngmin"]["lng"]
dict_point["haut_droite"]={}
dict_point["haut_droite"]["lat"]=dict["latmax"]["lat"]
dict_point["haut_droite"]["lng"]=dict["lngmax"]["lng"]
dict_point["bas_droite"]={}
dict_point["bas_droite"]["lat"]=dict["latmin"]["lat"]
dict_point["bas_droite"]["lng"]=dict["lngmax"]["lng"]
print("haut_gauche: ",dict_point["haut_gauche"]["lat"],dict_point["haut_gauche"]["lng"],"haut_droite : ",dict_point["haut_droite"]["lat"],dict_point["haut_droite"]["lng"],"bas_gauche :" ,dict_point["bas_gauche"]["lat"],dict_point["bas_gauche"]["lng"],"bas_droite :",dict_point["bas_droite"]["lat"],dict_point["bas_droite"]["lng"])
return dict_point
#calcul de distance entre deux point : appel a la bibliothèque geopy , les coordonnées données a la fonction sont de type (lng , lat )
def dist(x1,y1,x2,y2) :
#res=sqrt(pow(y2-y1,2)+pow(x2-x1,2))
couple1 =(x1,y1)
couple2 =(x2,y2)
res=(distance.distance(couple1, couple2).km)
return res
#fonction qui selon, un point , un radius , une liste de resto, renvoie le nb de point dans le cercle de rayon radius de centre le point donné
def nb_res_appel(x,y,radius,liste):
compteur=0
for res in liste:
resultat=dist(x,y,res["lng"],res["lat"])
if resultat<=radius:
compteur+=1
return compteur
def decoupage(x,y,radius,radius_reel,liste_resto):
""" Focntion appelé au cas ou il y aurait plus de 60 restaurant (contenue dans lite_resto) dans le cercle de centre (x,y) de rayon radius, découpe ce cercle en plusieurs cercle plus petit sur lesquels on va faire des appels, le but étant que dans chaque cercle il y ai moins de 60 restaurants"""
#code pas propre : faire une fonction pour le contenue qui se repete
new_x=x+radius /2
new_y=y+radius /(2)
nb_res=nb_res_appel(new_x,new_y,radius_reel/2,liste_resto)
if nb_res > 50 :
#print("appel rec en plus")
decoupage(new_x,new_y,radius/2,radius_reel/2,liste_resto)
else :
if nb_res != 0:
dict={}
dict["lng"]=new_x
dict["lat"]=new_y
dict["rr"]=radius_reel/2
dict["r"]=radius/2
l.append(dict)
new_x=x-radius /2
new_y=y+radius /(2)
nb_res=nb_res_appel(new_x,new_y,radius_reel/2,liste_resto)
if nb_res > 50 :
#print("appel rec en plus")
decoupage(new_x,new_y,radius/2,radius_reel/2,liste_resto)
else :
if nb_res != 0:
dict={}
dict["lng"]=new_x
dict["lat"]=new_y
dict["rr"]=radius_reel/2
dict["r"]=radius/2
l.append(dict)
new_x=x+radius /2
new_y=y-radius /(2)
nb_res=nb_res_appel(new_x,new_y,radius_reel/2,liste_resto)
if nb_res > 50 :
#print("appel rec en plus")
decoupage(new_x,new_y,radius/2,radius_reel/2,liste_resto)
else :
if nb_res != 0:
dict={}
dict["lng"]=new_x
dict["lat"]=new_y
dict["rr"]=radius_reel/2
dict["r"]=radius/2
l.append(dict)
new_x=x-radius /2
new_y=y-radius /(2)
nb_res=nb_res_appel(new_x,new_y,radius_reel/2,liste_resto)
if nb_res > 50 :
#print("appel rec en plus")
decoupage(new_x,new_y,radius/2,radius_reel/2,liste_resto)
else :
if nb_res != 0:
dict={}
dict["lng"]=new_x
dict["lat"]=new_y
dict["rr"]=radius_reel/2
dict["r"]=radius/2
l.append(dict)
new_x=x
new_y=y
nb_res=nb_res_appel(new_x,new_y,radius_reel/2,liste_resto)
if nb_res > 50 :
#print("appel rec en plus")
decoupage(new_x,new_y,radius/2,radius_reel/2,liste_resto)
else :
if nb_res != 0:
dict={}
dict["lng"]=new_x
dict["lat"]=new_y
dict["rr"]=radius_reel/2
dict["r"]=radius/2
l.append(dict)
new_x=x+radius
new_y=y
nb_res=nb_res_appel(new_x,new_y,radius_reel/2,liste_resto)
if nb_res > 50 :
#print("appel rec en plus")
decoupage(new_x,new_y,radius/2,radius_reel/2,liste_resto)
else :
if nb_res != 0:
dict={}
dict["lng"]=new_x
dict["lat"]=new_y
dict["rr"]=radius_reel/2
dict["r"]=radius/2
l.append(dict)
new_x=x
new_y=y+radius
nb_res=nb_res_appel(new_x,new_y,radius_reel/2,liste_resto)
if nb_res > 50 :
#print("appel rec en plus")
decoupage(new_x,new_y,radius/2,radius_reel/2,liste_resto)
else :
if nb_res != 0:
dict={}
dict["lng"]=new_x
dict["lat"]=new_y
dict["rr"]=radius_reel/2
dict["r"]=radius/2
l.append(dict)
new_x=x-radius
new_y=y
nb_res=nb_res_appel(new_x,new_y,radius_reel/2,liste_resto)
if nb_res > 50 :
#print("appel rec en plus")
decoupage(new_x,new_y,radius/2,radius_reel/2,liste_resto)
else :
if nb_res != 0:
dict={}
dict["lng"]=new_x
dict["lat"]=new_y
dict["rr"]=radius_reel/2
dict["r"]=radius/2
l.append(dict)
new_x=x
new_y=y-radius
nb_res=nb_res_appel(new_x,new_y,radius_reel/2,liste_resto)
if nb_res > 50 :
#print("appel rec en plus")
decoupage(new_x,new_y,radius/2,radius_reel/2,liste_resto)
else :
if nb_res != 0:
dict={}
dict["lng"]=new_x
dict["lat"]=new_y
dict["rr"]=radius_reel/2
dict["r"]=radius/2
l.append(dict)
return
def shrek(nb=5,fichier_res='liste_point_genere.txt',ancien_fichier="C:/Users/franc/OneDrive/Documents/FAC/LIFPROJET/LIFPROJET/JSON/restaurants.json"):
""" fonction ultime qui genère un fichier contenant les points ou faire nos appels """
liste_resto=[]
liste_resto=copy.deepcopy(lect_json(ancien_fichier)) #fichier json de reference dans lequel sont contenus tout les anciens restaurants
dict= calc_min_max_2(liste_resto)
radius=((dict["haut_droite"]["lng"]-dict["haut_gauche"]["lng"])) #on a besoin du radius en terme distance entre des points de type (lng , lat) afin de faire evoluer i et j , qui seront utiliser comme coordonnées de type (lng , lat) pour faire nos appels fictifs,décoper en cercle plus petit ect
radius=radius/nb
print(radius)
nb_ligne=(int)((dict["haut_gauche"]["lat"]-dict["bas_gauche"]["lat"])/radius) #on adapte le nombre de ligne sur lesquels on fait nos appels afin de quadriller toute la zone correctement (cf potentielle image fournies pour mieux voir)
nb_ligne=(nb_ligne+1)*2
#calcul du radius en distance réelle :
radius_reel=dist(dict["haut_gauche"]["lng"],dict["haut_gauche"]["lat"],dict["haut_gauche"]["lng"]+radius,dict["haut_gauche"]["lat"])# on en a besoin pour evaluer si un restaurant est dans le cercle ou non, comme la distance entre le restaurant et le centre du cercle sera dans cette unité
print(radius_reel)
for i in range(nb_ligne+1):
for j in range(nb+1) :
if i%2==0 :
x=dict["haut_gauche"]["lng"]+ 2*j*radius - radius
y=dict["haut_gauche"]["lat"]- i * radius
print("----")
if i%2==1 :
x=dict["haut_gauche"]["lng"]+ j*radius*2 + radius -radius
y=y=dict["haut_gauche"]["lat"]- i * radius
print("--")
nb_res=nb_res_appel(x,y,radius_reel,liste_resto)
if nb_res>50:
decoupage(x,y,radius,radius_reel,liste_resto)
else :
if nb_res != 0:
dict_res={}
dict_res["lng"]=x
dict_res["lat"]=y
dict_res["rr"]=radius_reel
dict_res["r"]=radius
l.append(dict_res)
print ("fini :)\n")
with open(fichier_res, 'w') as f:
f.write(json.dumps(l, indent=4))
print("Fini : nb points = ",len(l))
fig, ax=plt.subplots()
for d in l :
C=plt.Circle((d["lng"],d["lat"]),d["r"])
ax.add_artist(C)
print(d["lng"])
print(d["lat"])
print(d["r"])
ax.set_xlim((dict["haut_gauche"]["lng"]-0.01,dict["haut_droite"]["lng"]+0.01))
ax.set_ylim((dict["bas_gauche"]["lat"]-0.01,dict["haut_gauche"]["lat"]+0.01))
plt.show()
shrek(5,'liste_point_pour_restaurants_071218_final.txt',"C:/Users/franc/OneDrive/Documents/FAC/LIFPROJET/LIFPROJET/JSON/restaurants.json")
| 32.582456
| 331
| 0.682317
|
a630ae6734cb1d4117414dc82bad96e8114488a2
| 412
|
py
|
Python
|
popularity.py
|
MysteriousBaboon/Recommendation-system-books
|
24bb3a806238c61a5038de258dd51ad5f8c79adf
|
[
"MIT"
] | null | null | null |
popularity.py
|
MysteriousBaboon/Recommendation-system-books
|
24bb3a806238c61a5038de258dd51ad5f8c79adf
|
[
"MIT"
] | null | null | null |
popularity.py
|
MysteriousBaboon/Recommendation-system-books
|
24bb3a806238c61a5038de258dd51ad5f8c79adf
|
[
"MIT"
] | null | null | null |
import pandas as pd
def generate_recommendation(connection, number_recommendation):
df = pd.read_sql(
f"SELECT books.average_rating, books.authors, books.original_publication_year, books.language_code, "
f"books.title, books.image_url FROM books",
connection)
df = df.sort_values(by=['average_rating'], ascending=False)
df = df.iloc[:number_recommendation]
return df
| 27.466667
| 109
| 0.720874
|
6378675af973696cc52cbbc0e9dd8d93d2ae78f5
| 510
|
py
|
Python
|
apex/testing/common_utils.py
|
Mahathi-Vatsal/apex
|
063d720f1a41f1b5437f0cf7cbbc5e4a81392538
|
[
"BSD-3-Clause"
] | 6
|
2020-06-01T17:27:13.000Z
|
2022-01-10T08:59:50.000Z
|
apex/testing/common_utils.py
|
Mahathi-Vatsal/apex
|
063d720f1a41f1b5437f0cf7cbbc5e4a81392538
|
[
"BSD-3-Clause"
] | 43
|
2020-04-28T17:09:02.000Z
|
2022-03-31T18:10:01.000Z
|
apex/testing/common_utils.py
|
Mahathi-Vatsal/apex
|
063d720f1a41f1b5437f0cf7cbbc5e4a81392538
|
[
"BSD-3-Clause"
] | 9
|
2020-05-14T18:41:24.000Z
|
2022-03-30T00:09:42.000Z
|
'''
This file contains common utility functions for running the unit tests on ROCM.
'''
import torch
import os
import sys
from functools import wraps
import unittest
TEST_WITH_ROCM = os.getenv('APEX_TEST_WITH_ROCM', '0') == '1'
## Wrapper to skip the unit tests.
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on ROCm stack.")
else:
fn(*args, **kwargs)
return wrapper
| 22.173913
| 81
| 0.670588
|
b2aa6c5525e98198b0bbaf4c59efa99d9e3c1d02
| 13,809
|
py
|
Python
|
util/design/lib/LcStEnc.py
|
mdhayter/opentitan
|
78964d790429c086d131e8a71081266b3b044dcf
|
[
"Apache-2.0"
] | null | null | null |
util/design/lib/LcStEnc.py
|
mdhayter/opentitan
|
78964d790429c086d131e8a71081266b3b044dcf
|
[
"Apache-2.0"
] | null | null | null |
util/design/lib/LcStEnc.py
|
mdhayter/opentitan
|
78964d790429c086d131e8a71081266b3b044dcf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""Contains life cycle state encoding class which is
used to generate new life cycle encodings.
"""
import logging as log
import random
from collections import OrderedDict
from Crypto.Hash import cSHAKE128
from lib.common import (check_int, ecc_encode, get_hd, hd_histogram,
is_valid_codeword, random_or_hexvalue, scatter_bits)
# Seed diversification constant for LcStEnc (this enables to use
# the same seed for different classes)
LC_SEED_DIVERSIFIER = 1939944205722120255
# State types and permissible format for entries
# The format is index dependent, e.g. ['0', 'A1', 'B1'] for index 1
LC_STATE_TYPES = {
'lc_state': ['0', 'A{}', 'B{}'],
'lc_cnt': ['0', 'C{}', 'D{}'],
'lc_id_state': ['0', 'E{}', 'F{}']
}
def _is_incremental_codeword(word1, word2):
'''Test whether word2 is incremental wrt word1.'''
if len(word1) != len(word2):
raise RuntimeError('Words are not of equal size')
_word1 = int(word1, 2)
_word2 = int(word2, 2)
# This basically checks that the second word does not
# clear any bits that are set to 1 in the first word.
return ((_word1 & _word2) == _word1)
def _get_incremental_codewords(config, base_ecc, existing_words):
'''Get all possible incremental codewords fulfilling the constraints.'''
base_data = base_ecc[config['secded']['ecc_width']:]
# We only need to spin through data bits that have not been set yet.
# Hence, we first count how many bits are zero (and hence still
# modifyable). Then, we enumerate all possible combinations and scatter
# the bits of the enumerated values into the correct bit positions using
# the scatter_bits() function.
incr_cands = []
free_bits = base_data.count('0')
for k in range(1, 2**free_bits):
# Get incremental dataword by scattering the enumeration bits
# into the zero bit positions in base_data.
incr_cand = scatter_bits(base_data,
format(k, '0' + str(free_bits) + 'b'))
incr_cand_ecc = ecc_encode(config, incr_cand)
# Dataword is correct by construction, but we need to check whether
# the ECC bits are incremental.
if _is_incremental_codeword(base_ecc, incr_cand_ecc):
# Check whether the candidate fulfills the maximum
# Hamming weight constraint.
if incr_cand_ecc.count('1') <= config['max_hw']:
# Check Hamming distance wrt all existing words.
for w in existing_words + [base_ecc]:
if get_hd(incr_cand_ecc, w) < config['min_hd']:
break
else:
incr_cands.append(incr_cand_ecc)
return incr_cands
def _get_new_state_word_pair(config, existing_words):
'''Randomly generate a new incrementally writable word pair'''
while 1:
# Draw a random number and check whether it is unique and whether
# the Hamming weight is in range.
width = config['secded']['data_width']
ecc_width = config['secded']['ecc_width']
base = random.getrandbits(width)
base = format(base, '0' + str(width) + 'b')
base_cand_ecc = ecc_encode(config, base)
# disallow all-zero and all-one states
pop_cnt = base_cand_ecc.count('1')
if pop_cnt >= config['min_hw'] and pop_cnt <= config['max_hw']:
# Check Hamming distance wrt all existing words
for w in existing_words:
if get_hd(base_cand_ecc, w) < config['min_hd']:
break
else:
# Get encoded incremental candidates.
incr_cands_ecc = _get_incremental_codewords(
config, base_cand_ecc, existing_words)
# there are valid candidates, draw one at random.
# otherwise we just start over.
if incr_cands_ecc:
incr_cand_ecc = random.choice(incr_cands_ecc)
log.info('word {}: {}|{} -> {}|{}'.format(
int(len(existing_words) / 2),
base_cand_ecc[ecc_width:], base_cand_ecc[0:ecc_width],
incr_cand_ecc[ecc_width:], incr_cand_ecc[0:ecc_width]))
existing_words.append(base_cand_ecc)
existing_words.append(incr_cand_ecc)
return (base_cand_ecc, incr_cand_ecc)
def _validate_words(config, words):
'''Validate generated words (base and incremental).'''
for k, w in enumerate(words):
# Check whether word is valid wrt to ECC polynomial.
if not is_valid_codeword(config, w):
raise RuntimeError('Codeword {} at index {} is not valid'.format(
w, k))
# Check that word fulfills the Hamming weight constraints.
pop_cnt = w.count('1')
if pop_cnt < config['min_hw'] or pop_cnt > config['max_hw']:
raise RuntimeError(
'Codeword {} at index {} has wrong Hamming weight'.format(
w, k))
# Check Hamming distance wrt to all other existing words.
# If the constraint is larger than 0 this implies uniqueness.
if k < len(words) - 1:
for k2, w2 in enumerate(words[k + 1:]):
if get_hd(w, w2) < config['min_hd']:
raise RuntimeError(
'Hamming distance between codeword {} at index {} '
'and codeword {} at index {} is too low.'.format(
w, k, w2, k + 1 + k2))
def _validate_secded(config):
'''Validate SECDED configuration'''
config['secded'].setdefault('data_width', 0)
config['secded'].setdefault('ecc_width', 0)
config['secded'].setdefault('ecc_matrix', [[]])
config['secded']['data_width'] = check_int(config['secded']['data_width'])
config['secded']['ecc_width'] = check_int(config['secded']['ecc_width'])
total_width = config['secded']['data_width'] + config['secded']['ecc_width']
if config['secded']['data_width'] % 8:
raise RuntimeError('SECDED data width must be a multiple of 8')
if config['secded']['ecc_width'] != len(config['secded']['ecc_matrix']):
raise RuntimeError('ECC matrix does not have correct number of rows')
log.info('SECDED Matrix:')
for i, l in enumerate(config['secded']['ecc_matrix']):
log.info('ECC Bit {} Fanin: {}'.format(i, l))
for j, e in enumerate(l):
e = check_int(e)
if e < 0 or e >= total_width:
raise RuntimeError('ECC bit position is out of bounds')
config['secded']['ecc_matrix'][i][j] = e
def _validate_constraints(config):
'''Validates Hamming weight and distance constraints'''
config.setdefault('min_hw', 0)
config.setdefault('max_hw', 0)
config.setdefault('min_hd', 0)
config['min_hw'] = check_int(config['min_hw'])
config['max_hw'] = check_int(config['max_hw'])
config['min_hd'] = check_int(config['min_hd'])
total_width = config['secded']['data_width'] + config['secded']['ecc_width']
if config['min_hw'] >= total_width or \
config['max_hw'] > total_width or \
config['min_hw'] >= config['max_hw']:
raise RuntimeError('Hamming weight constraints are inconsistent.')
if config['max_hw'] - config['min_hw'] + 1 < config['min_hd']:
raise RuntimeError('Hamming distance constraint is inconsistent.')
def _validate_tokens(config):
'''Validates and hashes the tokens'''
config.setdefault('token_size', 128)
config['token_size'] = check_int(config['token_size'])
# This needs to be byte aligned
if config['token_size'] % 8:
raise ValueError('Size of token {} must be byte aligned'
.format(token['name']))
num_bytes = config['token_size'] // 8
hashed_tokens = []
for token in config['tokens']:
random_or_hexvalue(token, 'value', config['token_size'])
hashed_token = OrderedDict()
hashed_token['name'] = token['name'] + 'Hashed'
data = token['value'].to_bytes(num_bytes, byteorder='little')
# Custom string chosen for life cycle KMAC App interface
custom = 'LC_CTRL'.encode('UTF-8')
hashobj = cSHAKE128.new(data=data, custom=custom)
hashed_token['value'] = int.from_bytes(hashobj.read(num_bytes),
byteorder='little')
hashed_tokens.append(hashed_token)
config['tokens'] += hashed_tokens
def _validate_state_declarations(config):
'''Validates life cycle state and counter declarations'''
for typ in LC_STATE_TYPES.keys():
for k, state in enumerate(config[typ].keys()):
if k == 0:
config['num_' + typ + '_words'] = len(config[typ][state])
log.info('Inferred {} = {}'.format(
'num_' + typ + '_words', config['num_' + typ + '_words']))
if config['num_' + typ + '_words'] != len(config[typ][state]):
raise RuntimeError(
'{} entry {} has incorrect length {}'.format(
typ, state, len(config[typ][state])))
# Render the format templates above.
for j, entry in enumerate(config[typ][state]):
legal_values = [fmt.format(j) for fmt in LC_STATE_TYPES[typ]]
if entry not in legal_values:
raise RuntimeError(
'Illegal entry "{}" found in {} of {}'.format(
entry, state, typ))
def _generate_words(config):
'''Generate encoding words'''
config['genwords'] = {} # dict holding the word pairs for each state type
existing_words = [] # temporary list of all words for uniqueness tests
for typ in LC_STATE_TYPES.keys():
config['genwords'][typ] = []
for k in range(config['num_' + typ + '_words']):
new_word = _get_new_state_word_pair(config, existing_words)
config['genwords'][typ].append(new_word)
# Validate words (this must not fail at this point).
_validate_words(config, existing_words)
# Calculate and store statistics
config['stats'] = hd_histogram(existing_words)
log.info('')
log.info('Hamming distance histogram:')
log.info('')
for bar in config['stats']["bars"]:
log.info(bar)
log.info('')
log.info('Minimum HD: {}'.format(config['stats']['min_hd']))
log.info('Maximum HD: {}'.format(config['stats']['max_hd']))
log.info('Minimum HW: {}'.format(config['stats']['min_hw']))
log.info('Maximum HW: {}'.format(config['stats']['max_hw']))
class LcStEnc():
'''Life cycle state encoding generator class
The constructor expects the parsed configuration
hjson to be passed in.
'''
# This holds the config dict.
config = {}
def __init__(self, config):
'''The constructor validates the configuration dict.'''
log.info('')
log.info('Generate life cycle state')
log.info('')
if 'seed' not in config:
raise RuntimeError('Missing seed in configuration')
if 'secded' not in config:
raise RuntimeError('Missing secded configuration')
if 'tokens' not in config:
raise RuntimeError('Missing token configuration')
for typ in LC_STATE_TYPES.keys():
if typ not in config:
raise RuntimeError('Missing {} definition'.format(typ))
config['seed'] = check_int(config['seed'])
log.info('Seed: {0:x}'.format(config['seed']))
log.info('')
# Re-initialize with seed to make results reproducible.
random.seed(LC_SEED_DIVERSIFIER + int(config['seed']))
log.info('Checking SECDED.')
_validate_secded(config)
log.info('')
log.info('Checking Hamming weight and distance constraints.')
_validate_constraints(config)
log.info('')
log.info('Hashing tokens.')
_validate_tokens(config)
log.info('')
log.info('Checking state declarations.')
_validate_state_declarations(config)
log.info('')
log.info('Generate incremental word encodings.')
_generate_words(config)
self.config = config
log.info('')
log.info('Successfully generated life cycle state.')
log.info('')
def encode(self, name, state):
'''Look up state encoding and return as integer value'''
data_width = self.config['secded']['data_width']
ecc_width = self.config['secded']['ecc_width']
if name not in LC_STATE_TYPES:
raise RuntimeError('Unknown state type {}'.format(name))
if state not in self.config[name]:
raise RuntimeError('Unknown state {} of type {}'.format(
state, name))
# Assemble list of state words
words = []
for j, entry in enumerate(self.config[name][state]):
# This creates an index lookup table
val_idx = {
fmt.format(j): i
for i, fmt in enumerate(LC_STATE_TYPES[name])
}
idx = val_idx[entry]
if idx == 0:
words.append(0)
else:
# Only extract data portion, discard ECC portion
word = self.config['genwords'][name][j][idx - 1][ecc_width:]
words.append(int(word, 2))
# Convert words to one value
outval = 0
for k, word in enumerate(words):
outval += word << (data_width * k)
return outval
| 39.795389
| 80
| 0.599899
|
9725ab6e0678deec980e20b6f1e6bf9b8a15f3d9
| 5,909
|
py
|
Python
|
jiminy/gym/monitoring/tests/test_monitor.py
|
sibeshkar/jiminy
|
7754f86fb0f246e7d039ea0cbfd9950fcae4adfb
|
[
"MIT"
] | 3
|
2020-03-16T13:50:40.000Z
|
2021-06-09T05:26:13.000Z
|
jiminy/gym/monitoring/tests/test_monitor.py
|
sibeshkar/jiminy
|
7754f86fb0f246e7d039ea0cbfd9950fcae4adfb
|
[
"MIT"
] | null | null | null |
jiminy/gym/monitoring/tests/test_monitor.py
|
sibeshkar/jiminy
|
7754f86fb0f246e7d039ea0cbfd9950fcae4adfb
|
[
"MIT"
] | null | null | null |
import glob
import os
import jiminy.gym as gym
from jiminy.gym import error, spaces
from jiminy.gym import monitoring
from jiminy.gym.monitoring.tests import helpers
from jiminy.gym.wrappers import Monitor
from jiminy.gym.envs.registration import register
def test_monitor_filename():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env = Monitor(env, directory=temp)
env.close()
manifests = glob.glob(os.path.join(temp, '*.manifest.*'))
assert len(manifests) == 1
def test_write_upon_reset_false():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env = Monitor(env, directory=temp, video_callable=False, write_upon_reset=False)
env.reset()
files = glob.glob(os.path.join(temp, '*'))
assert not files, "Files: {}".format(files)
env.close()
files = glob.glob(os.path.join(temp, '*'))
assert len(files) > 0
def test_write_upon_reset_true():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env = Monitor(env, directory=temp, video_callable=False, write_upon_reset=True)
env.reset()
files = glob.glob(os.path.join(temp, '*'))
assert len(files) > 0, "Files: {}".format(files)
env.close()
files = glob.glob(os.path.join(temp, '*'))
assert len(files) > 0
def test_video_callable_true_not_allowed():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
try:
env = Monitor(env, temp, video_callable=True)
except error.Error:
pass
else:
assert False
def test_video_callable_false_does_not_record():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env = Monitor(env, temp, video_callable=False)
env.reset()
env.close()
results = monitoring.load_results(temp)
assert len(results['videos']) == 0
def test_video_callable_records_videos():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env = Monitor(env, temp)
env.reset()
env.close()
results = monitoring.load_results(temp)
assert len(results['videos']) == 1, "Videos: {}".format(results['videos'])
def test_semisuper_succeeds():
"""Regression test. Ensure that this can write"""
with helpers.tempdir() as temp:
env = gym.make('SemisuperPendulumDecay-v0')
env = Monitor(env, temp)
env.reset()
env.step(env.action_space.sample())
env.close()
class AutoresetEnv(gym.Env):
metadata = {'semantics.autoreset': True}
def __init__(self):
self.action_space = spaces.Discrete(1)
self.observation_space = spaces.Discrete(1)
def _reset(self):
return 0
def _step(self, action):
return 0, 0, False, {}
import logging
logger = logging.getLogger()
gym.envs.register(
id='Autoreset-v0',
entry_point='gym.monitoring.tests.test_monitor:AutoresetEnv',
max_episode_steps=2,
)
def test_env_reuse():
with helpers.tempdir() as temp:
env = gym.make('Autoreset-v0')
env = Monitor(env, temp)
env.reset()
_, _, done, _ = env.step(None)
assert not done
_, _, done, _ = env.step(None)
assert done
_, _, done, _ = env.step(None)
assert not done
_, _, done, _ = env.step(None)
assert done
env.close()
def test_no_monitor_reset_unless_done():
def assert_reset_raises(env):
errored = False
try:
env.reset()
except error.Error:
errored = True
assert errored, "Env allowed a reset when it shouldn't have"
with helpers.tempdir() as temp:
# Make sure we can reset as we please without monitor
env = gym.make('CartPole-v0')
env.reset()
env.step(env.action_space.sample())
env.step(env.action_space.sample())
env.reset()
# can reset once as soon as we start
env = Monitor(env, temp, video_callable=False)
env.reset()
# can reset multiple times in a row
env.reset()
env.reset()
env.step(env.action_space.sample())
env.step(env.action_space.sample())
assert_reset_raises(env)
# should allow resets after the episode is done
d = False
while not d:
_, _, d, _ = env.step(env.action_space.sample())
env.reset()
env.reset()
env.step(env.action_space.sample())
assert_reset_raises(env)
env.close()
def test_only_complete_episodes_written():
with helpers.tempdir() as temp:
env = gym.make('CartPole-v0')
env = Monitor(env, temp, video_callable=False)
env.reset()
d = False
while not d:
_, _, d, _ = env.step(env.action_space.sample())
env.reset()
env.step(env.action_space.sample())
env.close()
# Only 1 episode should be written
results = monitoring.load_results(temp)
assert len(results['episode_lengths']) == 1, "Found {} episodes written; expecting 1".format(len(results['episode_lengths']))
register(
id='test.StepsLimitCartpole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=2
)
def test_steps_limit_restart():
with helpers.tempdir() as temp:
env = gym.make('test.StepsLimitCartpole-v0')
env = Monitor(env, temp, video_callable=False)
env.reset()
# Episode has started
_, _, done, info = env.step(env.action_space.sample())
assert done == False
# Limit reached, now we get a done signal and the env resets itself
_, _, done, info = env.step(env.action_space.sample())
assert done == True
assert env.episode_id == 1
env.close()
| 28.684466
| 133
| 0.610763
|
78b3ffee082e86ccb9a2a8bc6587f1434891a371
| 4,670
|
py
|
Python
|
attendees/occasions/models/message_template.py
|
xjlin0/attendees32
|
25913c75ea8d916dcb065a23f2fa68bea558f77c
|
[
"MIT"
] | null | null | null |
attendees/occasions/models/message_template.py
|
xjlin0/attendees32
|
25913c75ea8d916dcb065a23f2fa68bea558f77c
|
[
"MIT"
] | 5
|
2022-01-21T03:26:40.000Z
|
2022-02-04T17:32:16.000Z
|
attendees/occasions/models/message_template.py
|
xjlin0/attendees32
|
25913c75ea8d916dcb065a23f2fa68bea558f77c
|
[
"MIT"
] | null | null | null |
import pghistory
from django.db import models
import django.utils.timezone
import model_utils.fields
from model_utils.models import SoftDeletableModel, TimeStampedModel
class MessageTemplate(TimeStampedModel, SoftDeletableModel):
id = models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
)
organization = models.ForeignKey(
"whereabouts.Organization", null=False, blank=False, on_delete=models.SET(0)
)
templates = models.JSONField(
null=True,
blank=True,
default=dict,
help_text='Example: {"body": "Dear {name}: Hello!"}. Whatever in curly braces will be interpolated by '
"variables, Please keep {} here even no data",
)
defaults = models.JSONField(
null=True,
blank=True,
default=dict,
help_text='Example: {"name": "John", "Date": "08/31/2020"}. Please keep {} here even no data',
)
type = models.SlugField(
max_length=50,
blank=False,
null=False,
unique=False,
help_text="format: Organization_slug-prefix-message-type-name",
)
class Meta:
db_table = "occasions_message_templates"
constraints = [
models.UniqueConstraint(
fields=["organization", "type"],
condition=models.Q(is_removed=False),
name="organization_type",
),
]
def __str__(self):
return "%s %s" % (self.organization, self.type)
class MessageTemplatesHistory(pghistory.get_event_model(
MessageTemplate,
pghistory.Snapshot('messagetemplate.snapshot'),
name='MessageTemplatesHistory',
related_name='history',
)):
pgh_id = models.BigAutoField(primary_key=True, serialize=False)
pgh_created_at = models.DateTimeField(auto_now_add=True)
pgh_label = models.TextField(help_text='The event label.')
pgh_obj = models.ForeignKey(db_constraint=False, on_delete=models.deletion.DO_NOTHING, related_name='history', to='occasions.messagetemplate')
id = models.BigIntegerField(db_index=True)
created = model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')
modified = model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')
is_removed = models.BooleanField(default=False)
organization = models.ForeignKey(db_constraint=False, on_delete=models.deletion.DO_NOTHING, related_name='+', related_query_name='+', to='whereabouts.organization')
templates = models.JSONField(blank=True, default=dict, help_text='Example: {"body": "Dear {name}: Hello!"}. Whatever in curly braces will be interpolated by variables, Please keep {} here even no data', null=True)
defaults = models.JSONField(blank=True, default=dict, help_text='Example: {"name": "John", "Date": "08/31/2020"}. Please keep {} here even no data', null=True)
type = models.SlugField(db_index=False, help_text='format: Organization_slug-prefix-message-type-name')
pgh_context = models.ForeignKey(db_constraint=False, null=True, on_delete=models.deletion.DO_NOTHING, related_name='+', to='pghistory.context')
class Meta:
db_table = 'occasions_message_templateshistory'
# from django.db import models
# from django.contrib.postgres.fields.jsonb import JSONField
# from model_utils.models import TimeStampedModel, SoftDeletableModel
#
# from attendees.persons.models import Utility
#
#
# class Message(TimeStampedModel, SoftDeletableModel, Utility):
# id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
# obfuscated = models.BooleanField(null=False, blank=False, default=False)
# template = models.ForeignKey(to='occasions.MessageTemplate')
# summary = models.CharField(max_length=50, blank=False, null=False, unique=False)
# last_processed = models.DateTimeField(null=True, blank=True)
# status = models.CharField(max_length=50)
# variables = models.JSONField(
# null=True,
# blank=True,
# default=dict,
# help_text='Example: {"name": "John", "Date": "08/31/2020"}. Please keep {} here even no data'
# )
#
# class Meta:
# db_table = 'occasions_messages'
# constraints = [
# models.UniqueConstraint(
# fields=['organization', 'type'],
# condition=models.Q(is_removed=False),
# name='organization_type'
# ),
# ]
#
# def __str__(self):
# return '%s %s %s %s' % (self.organization, self.type, self.status, self.last_processed)
| 44.056604
| 217
| 0.686081
|
871aff3e13e77d4c09147528d5232177c8752dc3
| 2,830
|
py
|
Python
|
glue/utils/qt/tests/test_decorators.py
|
tiagopereira/glue
|
85bf7ce2d252d7bc405e8160b56fc83d46b9cbe4
|
[
"BSD-3-Clause"
] | 1
|
2019-12-17T07:58:35.000Z
|
2019-12-17T07:58:35.000Z
|
glue/utils/qt/tests/test_decorators.py
|
scalet98/glue
|
ff949ad52e205c20561f48c05f870b2abb39e0b0
|
[
"BSD-3-Clause"
] | null | null | null |
glue/utils/qt/tests/test_decorators.py
|
scalet98/glue
|
ff949ad52e205c20561f48c05f870b2abb39e0b0
|
[
"BSD-3-Clause"
] | 1
|
2019-08-04T14:10:12.000Z
|
2019-08-04T14:10:12.000Z
|
from __future__ import absolute_import, division, print_function
import os
from mock import patch
from ..decorators import messagebox_on_error, die_on_error
def test_messagebox_on_error():
os.environ['GLUE_TESTING'] = 'False'
def failing_function():
raise ValueError("Dialog failure")
def working_function():
pass
@messagebox_on_error('An error occurred')
def decorated_failing_function():
failing_function()
@messagebox_on_error('An error occurred')
def decorated_working_function():
working_function()
# Test decorator
with patch('qtpy.QtWidgets.QMessageBox') as mb:
decorated_failing_function()
assert mb.call_args[0][2] == 'An error occurred\nDialog failure'
with patch('qtpy.QtWidgets.QMessageBox') as mb:
decorated_working_function()
assert mb.call_count == 0
# Test context manager
with patch('qtpy.QtWidgets.QMessageBox') as mb:
with messagebox_on_error('An error occurred'):
failing_function()
assert mb.call_args[0][2] == 'An error occurred\nDialog failure'
with patch('qtpy.QtWidgets.QMessageBox') as mb:
with messagebox_on_error('An error occurred'):
working_function()
assert mb.call_count == 0
os.environ['GLUE_TESTING'] = 'True'
def test_die_on_error():
os.environ['GLUE_TESTING'] = 'False'
def failing_function():
raise ValueError("Dialog failure")
def working_function():
pass
@die_on_error('An error occurred')
def decorated_failing_function():
failing_function()
@die_on_error('An error occurred')
def decorated_working_function():
working_function()
# Test decorator
with patch('sys.exit') as exit:
with patch('qtpy.QtWidgets.QMessageBox') as mb:
decorated_failing_function()
assert mb.call_args[0][2] == 'An error occurred\nDialog failure'
assert exit.called_once_with(1)
with patch('sys.exit') as exit:
with patch('qtpy.QtWidgets.QMessageBox') as mb:
decorated_working_function()
assert mb.call_count == 0
assert exit.call_count == 0
# Test context manager
with patch('sys.exit') as exit:
with patch('qtpy.QtWidgets.QMessageBox') as mb:
with die_on_error('An error occurred'):
failing_function()
assert mb.call_args[0][2] == 'An error occurred\nDialog failure'
assert exit.called_once_with(1)
with patch('sys.exit') as exit:
with patch('qtpy.QtWidgets.QMessageBox') as mb:
with die_on_error('An error occurred'):
working_function()
assert mb.call_count == 0
assert exit.call_count == 0
os.environ['GLUE_TESTING'] = 'True'
| 28.019802
| 76
| 0.650883
|
df8dff30f80168a4ce29e4ccd6755b5acc1a1fdd
| 27,833
|
py
|
Python
|
test/dialect/oracle/test_reflection.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
test/dialect/oracle/test_reflection.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
test/dialect/oracle/test_reflection.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from sqlalchemy import exc
from sqlalchemy import FLOAT
from sqlalchemy import ForeignKey
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Identity
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import INTEGER
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Numeric
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import Unicode
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.oracle.base import BINARY_DOUBLE
from sqlalchemy.dialects.oracle.base import BINARY_FLOAT
from sqlalchemy.dialects.oracle.base import DOUBLE_PRECISION
from sqlalchemy.dialects.oracle.base import NUMBER
from sqlalchemy.testing import assert_warns
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_true
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class MultiSchemaTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "oracle"
__backend__ = True
@classmethod
def setup_test_class(cls):
# currently assuming full DBA privs for the user.
# don't really know how else to go here unless
# we connect as the other user.
with testing.db.begin() as conn:
for stmt in (
"""
create table %(test_schema)s.parent(
id integer primary key,
data varchar2(50)
);
COMMENT ON TABLE %(test_schema)s.parent IS 'my table comment';
create table %(test_schema)s.child(
id integer primary key,
data varchar2(50),
parent_id integer references %(test_schema)s.parent(id)
);
create table local_table(
id integer primary key,
data varchar2(50)
);
create synonym %(test_schema)s.ptable for %(test_schema)s.parent;
create synonym %(test_schema)s.ctable for %(test_schema)s.child;
create synonym %(test_schema)s_pt for %(test_schema)s.parent;
create synonym %(test_schema)s.local_table for local_table;
-- can't make a ref from local schema to the
-- remote schema's table without this,
-- *and* can't give yourself a grant !
-- so we give it to public. ideas welcome.
grant references on %(test_schema)s.parent to public;
grant references on %(test_schema)s.child to public;
"""
% {"test_schema": testing.config.test_schema}
).split(";"):
if stmt.strip():
conn.exec_driver_sql(stmt)
@classmethod
def teardown_test_class(cls):
with testing.db.begin() as conn:
for stmt in (
"""
drop table %(test_schema)s.child;
drop table %(test_schema)s.parent;
drop table local_table;
drop synonym %(test_schema)s.ctable;
drop synonym %(test_schema)s.ptable;
drop synonym %(test_schema)s_pt;
drop synonym %(test_schema)s.local_table;
"""
% {"test_schema": testing.config.test_schema}
).split(";"):
if stmt.strip():
conn.exec_driver_sql(stmt)
def test_create_same_names_explicit_schema(self, metadata, connection):
schema = testing.db.dialect.default_schema_name
meta = metadata
parent = Table(
"parent",
meta,
Column("pid", Integer, primary_key=True),
schema=schema,
)
child = Table(
"child",
meta,
Column("cid", Integer, primary_key=True),
Column("pid", Integer, ForeignKey("%s.parent.pid" % schema)),
schema=schema,
)
meta.create_all(connection)
connection.execute(parent.insert(), {"pid": 1})
connection.execute(child.insert(), {"cid": 1, "pid": 1})
eq_(connection.execute(child.select()).fetchall(), [(1, 1)])
def test_reflect_alt_table_owner_local_synonym(self):
meta = MetaData()
parent = Table(
"%s_pt" % testing.config.test_schema,
meta,
autoload_with=testing.db,
oracle_resolve_synonyms=True,
)
self.assert_compile(
parent.select(),
"SELECT %(test_schema)s_pt.id, "
"%(test_schema)s_pt.data FROM %(test_schema)s_pt"
% {"test_schema": testing.config.test_schema},
)
def test_reflect_alt_synonym_owner_local_table(self):
meta = MetaData()
parent = Table(
"local_table",
meta,
autoload_with=testing.db,
oracle_resolve_synonyms=True,
schema=testing.config.test_schema,
)
self.assert_compile(
parent.select(),
"SELECT %(test_schema)s.local_table.id, "
"%(test_schema)s.local_table.data "
"FROM %(test_schema)s.local_table"
% {"test_schema": testing.config.test_schema},
)
def test_create_same_names_implicit_schema(self, metadata, connection):
meta = metadata
parent = Table(
"parent", meta, Column("pid", Integer, primary_key=True)
)
child = Table(
"child",
meta,
Column("cid", Integer, primary_key=True),
Column("pid", Integer, ForeignKey("parent.pid")),
)
meta.create_all(connection)
connection.execute(parent.insert(), {"pid": 1})
connection.execute(child.insert(), {"cid": 1, "pid": 1})
eq_(connection.execute(child.select()).fetchall(), [(1, 1)])
def test_reflect_alt_owner_explicit(self):
meta = MetaData()
parent = Table(
"parent",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
child = Table(
"child",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
self.assert_compile(
parent.join(child),
"%(test_schema)s.parent JOIN %(test_schema)s.child ON "
"%(test_schema)s.parent.id = %(test_schema)s.child.parent_id"
% {"test_schema": testing.config.test_schema},
)
with testing.db.connect() as conn:
conn.execute(
select(parent, child).select_from(parent.join(child))
).fetchall()
# check table comment (#5146)
eq_(parent.comment, "my table comment")
def test_reflect_table_comment(self, metadata, connection):
local_parent = Table(
"parent",
metadata,
Column("q", Integer),
comment="my local comment",
)
local_parent.create(connection)
insp = inspect(connection)
eq_(
insp.get_table_comment(
"parent", schema=testing.config.test_schema
),
{"text": "my table comment"},
)
eq_(
insp.get_table_comment(
"parent",
),
{"text": "my local comment"},
)
eq_(
insp.get_table_comment(
"parent", schema=connection.dialect.default_schema_name
),
{"text": "my local comment"},
)
def test_reflect_local_to_remote(self, connection):
connection.exec_driver_sql(
"CREATE TABLE localtable (id INTEGER "
"PRIMARY KEY, parent_id INTEGER REFERENCES "
"%(test_schema)s.parent(id))"
% {"test_schema": testing.config.test_schema},
)
try:
meta = MetaData()
lcl = Table("localtable", meta, autoload_with=testing.db)
parent = meta.tables["%s.parent" % testing.config.test_schema]
self.assert_compile(
parent.join(lcl),
"%(test_schema)s.parent JOIN localtable ON "
"%(test_schema)s.parent.id = "
"localtable.parent_id"
% {"test_schema": testing.config.test_schema},
)
finally:
connection.exec_driver_sql("DROP TABLE localtable")
def test_reflect_alt_owner_implicit(self):
meta = MetaData()
parent = Table(
"parent",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
child = Table(
"child",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
)
self.assert_compile(
parent.join(child),
"%(test_schema)s.parent JOIN %(test_schema)s.child "
"ON %(test_schema)s.parent.id = "
"%(test_schema)s.child.parent_id"
% {"test_schema": testing.config.test_schema},
)
with testing.db.connect() as conn:
conn.execute(
select(parent, child).select_from(parent.join(child))
).fetchall()
def test_reflect_alt_owner_synonyms(self, connection):
connection.exec_driver_sql(
"CREATE TABLE localtable (id INTEGER "
"PRIMARY KEY, parent_id INTEGER REFERENCES "
"%s.ptable(id))" % testing.config.test_schema,
)
try:
meta = MetaData()
lcl = Table(
"localtable",
meta,
autoload_with=connection,
oracle_resolve_synonyms=True,
)
parent = meta.tables["%s.ptable" % testing.config.test_schema]
self.assert_compile(
parent.join(lcl),
"%(test_schema)s.ptable JOIN localtable ON "
"%(test_schema)s.ptable.id = "
"localtable.parent_id"
% {"test_schema": testing.config.test_schema},
)
connection.execute(
select(parent, lcl).select_from(parent.join(lcl))
).fetchall()
finally:
connection.exec_driver_sql("DROP TABLE localtable")
def test_reflect_remote_synonyms(self):
meta = MetaData()
parent = Table(
"ptable",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
oracle_resolve_synonyms=True,
)
child = Table(
"ctable",
meta,
autoload_with=testing.db,
schema=testing.config.test_schema,
oracle_resolve_synonyms=True,
)
self.assert_compile(
parent.join(child),
"%(test_schema)s.ptable JOIN "
"%(test_schema)s.ctable "
"ON %(test_schema)s.ptable.id = "
"%(test_schema)s.ctable.parent_id"
% {"test_schema": testing.config.test_schema},
)
class ConstraintTest(fixtures.TablesTest):
__only_on__ = "oracle"
__backend__ = True
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table("foo", metadata, Column("id", Integer, primary_key=True))
def test_oracle_has_no_on_update_cascade(self, connection):
bar = Table(
"bar",
self.tables_test_metadata,
Column("id", Integer, primary_key=True),
Column(
"foo_id", Integer, ForeignKey("foo.id", onupdate="CASCADE")
),
)
assert_warns(exc.SAWarning, bar.create, connection)
bat = Table(
"bat",
self.tables_test_metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer),
ForeignKeyConstraint(["foo_id"], ["foo.id"], onupdate="CASCADE"),
)
assert_warns(exc.SAWarning, bat.create, connection)
def test_reflect_check_include_all(self, connection):
insp = inspect(connection)
eq_(insp.get_check_constraints("foo"), [])
eq_(
[
rec["sqltext"]
for rec in insp.get_check_constraints("foo", include_all=True)
],
['"ID" IS NOT NULL'],
)
class SystemTableTablenamesTest(fixtures.TestBase):
__only_on__ = "oracle"
__backend__ = True
def setup_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql("create table my_table (id integer)")
conn.exec_driver_sql(
"create global temporary table my_temp_table (id integer)",
)
conn.exec_driver_sql(
"create table foo_table (id integer) tablespace SYSTEM"
)
def teardown_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql("drop table my_temp_table")
conn.exec_driver_sql("drop table my_table")
conn.exec_driver_sql("drop table foo_table")
def test_table_names_no_system(self):
insp = inspect(testing.db)
eq_(insp.get_table_names(), ["my_table"])
def test_temp_table_names_no_system(self):
insp = inspect(testing.db)
eq_(insp.get_temp_table_names(), ["my_temp_table"])
def test_table_names_w_system(self):
engine = testing_engine(options={"exclude_tablespaces": ["FOO"]})
insp = inspect(engine)
eq_(
set(insp.get_table_names()).intersection(
["my_table", "foo_table"]
),
set(["my_table", "foo_table"]),
)
class DontReflectIOTTest(fixtures.TestBase):
"""test that index overflow tables aren't included in
table_names."""
__only_on__ = "oracle"
__backend__ = True
def setup_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql(
"""
CREATE TABLE admin_docindex(
token char(20),
doc_id NUMBER,
token_frequency NUMBER,
token_offsets VARCHAR2(2000),
CONSTRAINT pk_admin_docindex PRIMARY KEY (token, doc_id))
ORGANIZATION INDEX
TABLESPACE users
PCTTHRESHOLD 20
OVERFLOW TABLESPACE users
""",
)
def teardown_test(self):
with testing.db.begin() as conn:
conn.exec_driver_sql("drop table admin_docindex")
def test_reflect_all(self, connection):
m = MetaData()
m.reflect(connection)
eq_(set(t.name for t in m.tables.values()), set(["admin_docindex"]))
def all_tables_compression_missing():
with testing.db.connect() as conn:
if (
"Enterprise Edition"
not in conn.exec_driver_sql("select * from v$version").scalar()
# this works in Oracle Database 18c Express Edition Release
) and testing.db.dialect.server_version_info < (18,):
return True
return False
def all_tables_compress_for_missing():
with testing.db.connect() as conn:
if (
"Enterprise Edition"
not in conn.exec_driver_sql("select * from v$version").scalar()
):
return True
return False
class TableReflectionTest(fixtures.TestBase):
__only_on__ = "oracle"
__backend__ = True
@testing.fails_if(all_tables_compression_missing)
def test_reflect_basic_compression(self, metadata, connection):
tbl = Table(
"test_compress",
metadata,
Column("data", Integer, primary_key=True),
oracle_compress=True,
)
metadata.create_all(connection)
m2 = MetaData()
tbl = Table("test_compress", m2, autoload_with=connection)
# Don't hardcode the exact value, but it must be non-empty
assert tbl.dialect_options["oracle"]["compress"]
@testing.fails_if(all_tables_compress_for_missing)
def test_reflect_oltp_compression(self, metadata, connection):
tbl = Table(
"test_compress",
metadata,
Column("data", Integer, primary_key=True),
oracle_compress="OLTP",
)
metadata.create_all(connection)
m2 = MetaData()
tbl = Table("test_compress", m2, autoload_with=connection)
assert tbl.dialect_options["oracle"]["compress"] == "OLTP"
class RoundTripIndexTest(fixtures.TestBase):
__only_on__ = "oracle"
__backend__ = True
def test_no_pk(self, metadata, connection):
Table(
"sometable",
metadata,
Column("id_a", Unicode(255)),
Column("id_b", Unicode(255)),
Index("pk_idx_1", "id_a", "id_b", unique=True),
Index("pk_idx_2", "id_b", "id_a", unique=True),
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(
insp.get_indexes("sometable"),
[
{
"name": "pk_idx_1",
"column_names": ["id_a", "id_b"],
"dialect_options": {},
"unique": True,
},
{
"name": "pk_idx_2",
"column_names": ["id_b", "id_a"],
"dialect_options": {},
"unique": True,
},
],
)
@testing.combinations((True,), (False,), argnames="explicit_pk")
def test_include_indexes_resembling_pk(
self, metadata, connection, explicit_pk
):
t = Table(
"sometable",
metadata,
Column("id_a", Unicode(255), primary_key=True),
Column("id_b", Unicode(255), primary_key=True),
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
# Oracle won't let you do this unless the indexes have
# the columns in different order
Index("pk_idx_1", "id_b", "id_a", "group", unique=True),
Index("pk_idx_2", "id_b", "group", "id_a", unique=True),
)
if explicit_pk:
t.append_constraint(
PrimaryKeyConstraint(
"id_a", "id_b", "group", name="some_primary_key"
)
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(
insp.get_indexes("sometable"),
[
{
"name": "pk_idx_1",
"column_names": ["id_b", "id_a", "group"],
"dialect_options": {},
"unique": True,
},
{
"name": "pk_idx_2",
"column_names": ["id_b", "group", "id_a"],
"dialect_options": {},
"unique": True,
},
],
)
def test_reflect_fn_index(self, metadata, connection):
"""test reflection of a functional index.
it appears this emitted a warning at some point but does not right now.
the returned data is not exactly correct, but this is what it's
likely been doing for many years.
"""
s_table = Table(
"sometable",
metadata,
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
)
Index("data_idx", func.upper(s_table.c.col))
metadata.create_all(connection)
eq_(
inspect(connection).get_indexes("sometable"),
[
{
"column_names": [],
"dialect_options": {},
"name": "data_idx",
"unique": False,
}
],
)
def test_basic(self, metadata, connection):
s_table = Table(
"sometable",
metadata,
Column("id_a", Unicode(255), primary_key=True),
Column("id_b", Unicode(255), primary_key=True, unique=True),
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
UniqueConstraint("col", "group"),
)
# "group" is a keyword, so lower case
normalind = Index("tableind", s_table.c.id_b, s_table.c.group)
Index(
"compress1", s_table.c.id_a, s_table.c.id_b, oracle_compress=True
)
Index(
"compress2",
s_table.c.id_a,
s_table.c.id_b,
s_table.c.col,
oracle_compress=1,
)
metadata.create_all(connection)
mirror = MetaData()
mirror.reflect(connection)
metadata.drop_all(connection)
mirror.create_all(connection)
inspect = MetaData()
inspect.reflect(connection)
def obj_definition(obj):
return (
obj.__class__,
tuple([c.name for c in obj.columns]),
getattr(obj, "unique", None),
)
# find what the primary k constraint name should be
primaryconsname = connection.scalar(
text(
"""SELECT constraint_name
FROM all_constraints
WHERE table_name = :table_name
AND owner = :owner
AND constraint_type = 'P' """
),
dict(
table_name=s_table.name.upper(),
owner=testing.db.dialect.default_schema_name.upper(),
),
)
reflectedtable = inspect.tables[s_table.name]
# make a dictionary of the reflected objects:
reflected = dict(
[
(obj_definition(i), i)
for i in reflectedtable.indexes | reflectedtable.constraints
]
)
# assert we got primary key constraint and its name, Error
# if not in dict
assert (
reflected[
(PrimaryKeyConstraint, ("id_a", "id_b", "group"), None)
].name.upper()
== primaryconsname.upper()
)
# Error if not in dict
eq_(reflected[(Index, ("id_b", "group"), False)].name, normalind.name)
assert (Index, ("id_b",), True) in reflected
assert (Index, ("col", "group"), True) in reflected
idx = reflected[(Index, ("id_a", "id_b"), False)]
assert idx.dialect_options["oracle"]["compress"] == 2
idx = reflected[(Index, ("id_a", "id_b", "col"), False)]
assert idx.dialect_options["oracle"]["compress"] == 1
eq_(len(reflectedtable.constraints), 1)
eq_(len(reflectedtable.indexes), 5)
class DBLinkReflectionTest(fixtures.TestBase):
__requires__ = ("oracle_test_dblink",)
__only_on__ = "oracle"
__backend__ = True
@classmethod
def setup_test_class(cls):
from sqlalchemy.testing import config
cls.dblink = config.file_config.get("sqla_testing", "oracle_db_link")
# note that the synonym here is still not totally functional
# when accessing via a different username as we do with the
# multiprocess test suite, so testing here is minimal
with testing.db.begin() as conn:
conn.exec_driver_sql(
"create table test_table "
"(id integer primary key, data varchar2(50))"
)
conn.exec_driver_sql(
"create synonym test_table_syn "
"for test_table@%s" % cls.dblink
)
@classmethod
def teardown_test_class(cls):
with testing.db.begin() as conn:
conn.exec_driver_sql("drop synonym test_table_syn")
conn.exec_driver_sql("drop table test_table")
def test_reflection(self):
"""test the resolution of the synonym/dblink."""
m = MetaData()
t = Table(
"test_table_syn",
m,
autoload_with=testing.db,
oracle_resolve_synonyms=True,
)
eq_(list(t.c.keys()), ["id", "data"])
eq_(list(t.primary_key), [t.c.id])
class TypeReflectionTest(fixtures.TestBase):
__only_on__ = "oracle"
__backend__ = True
def _run_test(self, metadata, connection, specs, attributes):
columns = [Column("c%i" % (i + 1), t[0]) for i, t in enumerate(specs)]
m = metadata
Table("oracle_types", m, *columns)
m.create_all(connection)
m2 = MetaData()
table = Table("oracle_types", m2, autoload_with=connection)
for i, (reflected_col, spec) in enumerate(zip(table.c, specs)):
expected_spec = spec[1]
reflected_type = reflected_col.type
is_(type(reflected_type), type(expected_spec))
for attr in attributes:
eq_(
getattr(reflected_type, attr),
getattr(expected_spec, attr),
"Column %s: Attribute %s value of %s does not "
"match %s for type %s"
% (
"c%i" % (i + 1),
attr,
getattr(reflected_type, attr),
getattr(expected_spec, attr),
spec[0],
),
)
def test_integer_types(self, metadata, connection):
specs = [(Integer, INTEGER()), (Numeric, INTEGER())]
self._run_test(metadata, connection, specs, [])
def test_number_types(
self,
metadata,
connection,
):
specs = [(Numeric(5, 2), NUMBER(5, 2)), (NUMBER, NUMBER())]
self._run_test(metadata, connection, specs, ["precision", "scale"])
def test_float_types(
self,
metadata,
connection,
):
specs = [
(DOUBLE_PRECISION(), FLOAT()),
# when binary_precision is supported
# (DOUBLE_PRECISION(), oracle.FLOAT(binary_precision=126)),
(BINARY_DOUBLE(), BINARY_DOUBLE()),
(BINARY_FLOAT(), BINARY_FLOAT()),
(FLOAT(5), FLOAT()),
# when binary_precision is supported
# (FLOAT(5), oracle.FLOAT(binary_precision=5),),
(FLOAT(), FLOAT()),
# when binary_precision is supported
# (FLOAT(5), oracle.FLOAT(binary_precision=126),),
]
self._run_test(metadata, connection, specs, ["precision"])
class IdentityReflectionTest(fixtures.TablesTest):
__only_on__ = "oracle"
__backend__ = True
__requires__ = ("identity_columns",)
@classmethod
def define_tables(cls, metadata):
Table("t1", metadata, Column("id1", Integer, Identity(on_null=True)))
Table("t2", metadata, Column("id2", Integer, Identity(order=True)))
def test_reflect_identity(self):
insp = inspect(testing.db)
common = {
"always": False,
"start": 1,
"increment": 1,
"on_null": False,
"maxvalue": 10 ** 28 - 1,
"minvalue": 1,
"cycle": False,
"cache": 20,
"order": False,
}
for col in insp.get_columns("t1") + insp.get_columns("t2"):
if col["name"] == "id1":
is_true("identity" in col)
exp = common.copy()
exp["on_null"] = True
eq_(col["identity"], exp)
if col["name"] == "id2":
is_true("identity" in col)
exp = common.copy()
exp["order"] = True
eq_(col["identity"], exp)
| 32.553216
| 79
| 0.555348
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.